applied-ai-018 commited on
Commit
596e894
·
verified ·
1 Parent(s): bd32e04

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so +3 -0
  3. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py +364 -0
  10. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py +0 -0
  11. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py +377 -0
  17. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py +52 -0
  18. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py +286 -0
  19. llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py +171 -0
  20. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__init__.py +44 -0
  21. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py +1242 -0
  28. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_base.py +301 -0
  29. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_forest.py +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_gb.py +2168 -0
  31. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py +5 -0
  33. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so +0 -0
  35. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd +18 -0
  36. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so +0 -0
  37. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so +0 -0
  38. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py +321 -0
  39. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so +0 -0
  40. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd +44 -0
  41. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py +2270 -0
  42. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py +798 -0
  43. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so +0 -0
  44. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py +144 -0
  45. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so +0 -0
  46. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py +0 -0
  47. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py +489 -0
  48. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py +64 -0
  49. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py +279 -0
  50. llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py +1683 -0
.gitattributes CHANGED
@@ -84,3 +84,4 @@ llmeval-env/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu
84
  llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
85
  llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
86
  llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
84
  llmeval-env/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
85
  llmeval-env/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
86
  llmeval-env/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
87
+ llmeval-env/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9254d0854dd7615e11de28d771ae408878ca8123a7ac204f21e4cc7a376cc2e5
3
+ size 35123345
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc ADDED
Binary file (9.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc ADDED
Binary file (30.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_shrunk_covariance.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Maximum likelihood covariance estimator.
3
+
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <[email protected]>
7
+ # Gael Varoquaux <[email protected]>
8
+ # Virgile Fritsch <[email protected]>
9
+ #
10
+ # License: BSD 3 clause
11
+
12
+ # avoid division truncation
13
+ import warnings
14
+
15
+ import numpy as np
16
+ from scipy import linalg
17
+
18
+ from .. import config_context
19
+ from ..base import BaseEstimator, _fit_context
20
+ from ..metrics.pairwise import pairwise_distances
21
+ from ..utils import check_array
22
+ from ..utils._param_validation import validate_params
23
+ from ..utils.extmath import fast_logdet
24
+
25
+
26
+ @validate_params(
27
+ {
28
+ "emp_cov": [np.ndarray],
29
+ "precision": [np.ndarray],
30
+ },
31
+ prefer_skip_nested_validation=True,
32
+ )
33
+ def log_likelihood(emp_cov, precision):
34
+ """Compute the sample mean of the log_likelihood under a covariance model.
35
+
36
+ Computes the empirical expected log-likelihood, allowing for universal
37
+ comparison (beyond this software package), and accounts for normalization
38
+ terms and scaling.
39
+
40
+ Parameters
41
+ ----------
42
+ emp_cov : ndarray of shape (n_features, n_features)
43
+ Maximum Likelihood Estimator of covariance.
44
+
45
+ precision : ndarray of shape (n_features, n_features)
46
+ The precision matrix of the covariance model to be tested.
47
+
48
+ Returns
49
+ -------
50
+ log_likelihood_ : float
51
+ Sample mean of the log-likelihood.
52
+ """
53
+ p = precision.shape[0]
54
+ log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
55
+ log_likelihood_ -= p * np.log(2 * np.pi)
56
+ log_likelihood_ /= 2.0
57
+ return log_likelihood_
58
+
59
+
60
+ @validate_params(
61
+ {
62
+ "X": ["array-like"],
63
+ "assume_centered": ["boolean"],
64
+ },
65
+ prefer_skip_nested_validation=True,
66
+ )
67
+ def empirical_covariance(X, *, assume_centered=False):
68
+ """Compute the Maximum likelihood covariance estimator.
69
+
70
+ Parameters
71
+ ----------
72
+ X : ndarray of shape (n_samples, n_features)
73
+ Data from which to compute the covariance estimate.
74
+
75
+ assume_centered : bool, default=False
76
+ If `True`, data will not be centered before computation.
77
+ Useful when working with data whose mean is almost, but not exactly
78
+ zero.
79
+ If `False`, data will be centered before computation.
80
+
81
+ Returns
82
+ -------
83
+ covariance : ndarray of shape (n_features, n_features)
84
+ Empirical covariance (Maximum Likelihood Estimator).
85
+
86
+ Examples
87
+ --------
88
+ >>> from sklearn.covariance import empirical_covariance
89
+ >>> X = [[1,1,1],[1,1,1],[1,1,1],
90
+ ... [0,0,0],[0,0,0],[0,0,0]]
91
+ >>> empirical_covariance(X)
92
+ array([[0.25, 0.25, 0.25],
93
+ [0.25, 0.25, 0.25],
94
+ [0.25, 0.25, 0.25]])
95
+ """
96
+ X = check_array(X, ensure_2d=False, force_all_finite=False)
97
+
98
+ if X.ndim == 1:
99
+ X = np.reshape(X, (1, -1))
100
+
101
+ if X.shape[0] == 1:
102
+ warnings.warn(
103
+ "Only one sample available. You may want to reshape your data array"
104
+ )
105
+
106
+ if assume_centered:
107
+ covariance = np.dot(X.T, X) / X.shape[0]
108
+ else:
109
+ covariance = np.cov(X.T, bias=1)
110
+
111
+ if covariance.ndim == 0:
112
+ covariance = np.array([[covariance]])
113
+ return covariance
114
+
115
+
116
+ class EmpiricalCovariance(BaseEstimator):
117
+ """Maximum likelihood covariance estimator.
118
+
119
+ Read more in the :ref:`User Guide <covariance>`.
120
+
121
+ Parameters
122
+ ----------
123
+ store_precision : bool, default=True
124
+ Specifies if the estimated precision is stored.
125
+
126
+ assume_centered : bool, default=False
127
+ If True, data are not centered before computation.
128
+ Useful when working with data whose mean is almost, but not exactly
129
+ zero.
130
+ If False (default), data are centered before computation.
131
+
132
+ Attributes
133
+ ----------
134
+ location_ : ndarray of shape (n_features,)
135
+ Estimated location, i.e. the estimated mean.
136
+
137
+ covariance_ : ndarray of shape (n_features, n_features)
138
+ Estimated covariance matrix
139
+
140
+ precision_ : ndarray of shape (n_features, n_features)
141
+ Estimated pseudo-inverse matrix.
142
+ (stored only if store_precision is True)
143
+
144
+ n_features_in_ : int
145
+ Number of features seen during :term:`fit`.
146
+
147
+ .. versionadded:: 0.24
148
+
149
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
150
+ Names of features seen during :term:`fit`. Defined only when `X`
151
+ has feature names that are all strings.
152
+
153
+ .. versionadded:: 1.0
154
+
155
+ See Also
156
+ --------
157
+ EllipticEnvelope : An object for detecting outliers in
158
+ a Gaussian distributed dataset.
159
+ GraphicalLasso : Sparse inverse covariance estimation
160
+ with an l1-penalized estimator.
161
+ LedoitWolf : LedoitWolf Estimator.
162
+ MinCovDet : Minimum Covariance Determinant
163
+ (robust estimator of covariance).
164
+ OAS : Oracle Approximating Shrinkage Estimator.
165
+ ShrunkCovariance : Covariance estimator with shrinkage.
166
+
167
+ Examples
168
+ --------
169
+ >>> import numpy as np
170
+ >>> from sklearn.covariance import EmpiricalCovariance
171
+ >>> from sklearn.datasets import make_gaussian_quantiles
172
+ >>> real_cov = np.array([[.8, .3],
173
+ ... [.3, .4]])
174
+ >>> rng = np.random.RandomState(0)
175
+ >>> X = rng.multivariate_normal(mean=[0, 0],
176
+ ... cov=real_cov,
177
+ ... size=500)
178
+ >>> cov = EmpiricalCovariance().fit(X)
179
+ >>> cov.covariance_
180
+ array([[0.7569..., 0.2818...],
181
+ [0.2818..., 0.3928...]])
182
+ >>> cov.location_
183
+ array([0.0622..., 0.0193...])
184
+ """
185
+
186
+ _parameter_constraints: dict = {
187
+ "store_precision": ["boolean"],
188
+ "assume_centered": ["boolean"],
189
+ }
190
+
191
+ def __init__(self, *, store_precision=True, assume_centered=False):
192
+ self.store_precision = store_precision
193
+ self.assume_centered = assume_centered
194
+
195
+ def _set_covariance(self, covariance):
196
+ """Saves the covariance and precision estimates
197
+
198
+ Storage is done accordingly to `self.store_precision`.
199
+ Precision stored only if invertible.
200
+
201
+ Parameters
202
+ ----------
203
+ covariance : array-like of shape (n_features, n_features)
204
+ Estimated covariance matrix to be stored, and from which precision
205
+ is computed.
206
+ """
207
+ covariance = check_array(covariance)
208
+ # set covariance
209
+ self.covariance_ = covariance
210
+ # set precision
211
+ if self.store_precision:
212
+ self.precision_ = linalg.pinvh(covariance, check_finite=False)
213
+ else:
214
+ self.precision_ = None
215
+
216
+ def get_precision(self):
217
+ """Getter for the precision matrix.
218
+
219
+ Returns
220
+ -------
221
+ precision_ : array-like of shape (n_features, n_features)
222
+ The precision matrix associated to the current covariance object.
223
+ """
224
+ if self.store_precision:
225
+ precision = self.precision_
226
+ else:
227
+ precision = linalg.pinvh(self.covariance_, check_finite=False)
228
+ return precision
229
+
230
+ @_fit_context(prefer_skip_nested_validation=True)
231
+ def fit(self, X, y=None):
232
+ """Fit the maximum likelihood covariance estimator to X.
233
+
234
+ Parameters
235
+ ----------
236
+ X : array-like of shape (n_samples, n_features)
237
+ Training data, where `n_samples` is the number of samples and
238
+ `n_features` is the number of features.
239
+
240
+ y : Ignored
241
+ Not used, present for API consistency by convention.
242
+
243
+ Returns
244
+ -------
245
+ self : object
246
+ Returns the instance itself.
247
+ """
248
+ X = self._validate_data(X)
249
+ if self.assume_centered:
250
+ self.location_ = np.zeros(X.shape[1])
251
+ else:
252
+ self.location_ = X.mean(0)
253
+ covariance = empirical_covariance(X, assume_centered=self.assume_centered)
254
+ self._set_covariance(covariance)
255
+
256
+ return self
257
+
258
+ def score(self, X_test, y=None):
259
+ """Compute the log-likelihood of `X_test` under the estimated Gaussian model.
260
+
261
+ The Gaussian model is defined by its mean and covariance matrix which are
262
+ represented respectively by `self.location_` and `self.covariance_`.
263
+
264
+ Parameters
265
+ ----------
266
+ X_test : array-like of shape (n_samples, n_features)
267
+ Test data of which we compute the likelihood, where `n_samples` is
268
+ the number of samples and `n_features` is the number of features.
269
+ `X_test` is assumed to be drawn from the same distribution than
270
+ the data used in fit (including centering).
271
+
272
+ y : Ignored
273
+ Not used, present for API consistency by convention.
274
+
275
+ Returns
276
+ -------
277
+ res : float
278
+ The log-likelihood of `X_test` with `self.location_` and `self.covariance_`
279
+ as estimators of the Gaussian model mean and covariance matrix respectively.
280
+ """
281
+ X_test = self._validate_data(X_test, reset=False)
282
+ # compute empirical covariance of the test set
283
+ test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
284
+ # compute log likelihood
285
+ res = log_likelihood(test_cov, self.get_precision())
286
+
287
+ return res
288
+
289
+ def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
290
+ """Compute the Mean Squared Error between two covariance estimators.
291
+
292
+ Parameters
293
+ ----------
294
+ comp_cov : array-like of shape (n_features, n_features)
295
+ The covariance to compare with.
296
+
297
+ norm : {"frobenius", "spectral"}, default="frobenius"
298
+ The type of norm used to compute the error. Available error types:
299
+ - 'frobenius' (default): sqrt(tr(A^t.A))
300
+ - 'spectral': sqrt(max(eigenvalues(A^t.A))
301
+ where A is the error ``(comp_cov - self.covariance_)``.
302
+
303
+ scaling : bool, default=True
304
+ If True (default), the squared error norm is divided by n_features.
305
+ If False, the squared error norm is not rescaled.
306
+
307
+ squared : bool, default=True
308
+ Whether to compute the squared error norm or the error norm.
309
+ If True (default), the squared error norm is returned.
310
+ If False, the error norm is returned.
311
+
312
+ Returns
313
+ -------
314
+ result : float
315
+ The Mean Squared Error (in the sense of the Frobenius norm) between
316
+ `self` and `comp_cov` covariance estimators.
317
+ """
318
+ # compute the error
319
+ error = comp_cov - self.covariance_
320
+ # compute the error norm
321
+ if norm == "frobenius":
322
+ squared_norm = np.sum(error**2)
323
+ elif norm == "spectral":
324
+ squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
325
+ else:
326
+ raise NotImplementedError(
327
+ "Only spectral and frobenius norms are implemented"
328
+ )
329
+ # optionally scale the error norm
330
+ if scaling:
331
+ squared_norm = squared_norm / error.shape[0]
332
+ # finally get either the squared norm or the norm
333
+ if squared:
334
+ result = squared_norm
335
+ else:
336
+ result = np.sqrt(squared_norm)
337
+
338
+ return result
339
+
340
+ def mahalanobis(self, X):
341
+ """Compute the squared Mahalanobis distances of given observations.
342
+
343
+ Parameters
344
+ ----------
345
+ X : array-like of shape (n_samples, n_features)
346
+ The observations, the Mahalanobis distances of the which we
347
+ compute. Observations are assumed to be drawn from the same
348
+ distribution than the data used in fit.
349
+
350
+ Returns
351
+ -------
352
+ dist : ndarray of shape (n_samples,)
353
+ Squared Mahalanobis distances of the observations.
354
+ """
355
+ X = self._validate_data(X, reset=False)
356
+
357
+ precision = self.get_precision()
358
+ with config_context(assume_finite=True):
359
+ # compute mahalanobis distances
360
+ dist = pairwise_distances(
361
+ X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
362
+ )
363
+
364
+ return np.reshape(dist, (len(X),)) ** 2
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc ADDED
Binary file (7.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc ADDED
Binary file (8.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc ADDED
Binary file (4.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Alexandre Gramfort <[email protected]>
2
+ # Gael Varoquaux <[email protected]>
3
+ # Virgile Fritsch <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from sklearn import datasets
11
+ from sklearn.covariance import (
12
+ OAS,
13
+ EmpiricalCovariance,
14
+ LedoitWolf,
15
+ ShrunkCovariance,
16
+ empirical_covariance,
17
+ ledoit_wolf,
18
+ ledoit_wolf_shrinkage,
19
+ oas,
20
+ shrunk_covariance,
21
+ )
22
+ from sklearn.covariance._shrunk_covariance import _ledoit_wolf
23
+ from sklearn.utils._testing import (
24
+ assert_allclose,
25
+ assert_almost_equal,
26
+ assert_array_almost_equal,
27
+ assert_array_equal,
28
+ )
29
+
30
+ from .._shrunk_covariance import _oas
31
+
32
+ X, _ = datasets.load_diabetes(return_X_y=True)
33
+ X_1d = X[:, 0]
34
+ n_samples, n_features = X.shape
35
+
36
+
37
+ def test_covariance():
38
+ # Tests Covariance module on a simple dataset.
39
+ # test covariance fit from data
40
+ cov = EmpiricalCovariance()
41
+ cov.fit(X)
42
+ emp_cov = empirical_covariance(X)
43
+ assert_array_almost_equal(emp_cov, cov.covariance_, 4)
44
+ assert_almost_equal(cov.error_norm(emp_cov), 0)
45
+ assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0)
46
+ assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0)
47
+ assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0)
48
+ assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0)
49
+ with pytest.raises(NotImplementedError):
50
+ cov.error_norm(emp_cov, norm="foo")
51
+ # Mahalanobis distances computation test
52
+ mahal_dist = cov.mahalanobis(X)
53
+ assert np.amin(mahal_dist) > 0
54
+
55
+ # test with n_features = 1
56
+ X_1d = X[:, 0].reshape((-1, 1))
57
+ cov = EmpiricalCovariance()
58
+ cov.fit(X_1d)
59
+ assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
60
+ assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
61
+ assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0)
62
+
63
+ # test with one sample
64
+ # Create X with 1 sample and 5 features
65
+ X_1sample = np.arange(5).reshape(1, 5)
66
+ cov = EmpiricalCovariance()
67
+ warn_msg = "Only one sample available. You may want to reshape your data array"
68
+ with pytest.warns(UserWarning, match=warn_msg):
69
+ cov.fit(X_1sample)
70
+
71
+ assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
72
+
73
+ # test integer type
74
+ X_integer = np.asarray([[0, 1], [1, 0]])
75
+ result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
76
+ assert_array_almost_equal(empirical_covariance(X_integer), result)
77
+
78
+ # test centered case
79
+ cov = EmpiricalCovariance(assume_centered=True)
80
+ cov.fit(X)
81
+ assert_array_equal(cov.location_, np.zeros(X.shape[1]))
82
+
83
+
84
+ @pytest.mark.parametrize("n_matrices", [1, 3])
85
+ def test_shrunk_covariance_func(n_matrices):
86
+ """Check `shrunk_covariance` function."""
87
+
88
+ n_features = 2
89
+ cov = np.ones((n_features, n_features))
90
+ cov_target = np.array([[1, 0.5], [0.5, 1]])
91
+
92
+ if n_matrices > 1:
93
+ cov = np.repeat(cov[np.newaxis, ...], n_matrices, axis=0)
94
+ cov_target = np.repeat(cov_target[np.newaxis, ...], n_matrices, axis=0)
95
+
96
+ cov_shrunk = shrunk_covariance(cov, 0.5)
97
+ assert_allclose(cov_shrunk, cov_target)
98
+
99
+
100
+ def test_shrunk_covariance():
101
+ """Check consistency between `ShrunkCovariance` and `shrunk_covariance`."""
102
+
103
+ # Tests ShrunkCovariance module on a simple dataset.
104
+ # compare shrunk covariance obtained from data and from MLE estimate
105
+ cov = ShrunkCovariance(shrinkage=0.5)
106
+ cov.fit(X)
107
+ assert_array_almost_equal(
108
+ shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4
109
+ )
110
+
111
+ # same test with shrinkage not provided
112
+ cov = ShrunkCovariance()
113
+ cov.fit(X)
114
+ assert_array_almost_equal(
115
+ shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4
116
+ )
117
+
118
+ # same test with shrinkage = 0 (<==> empirical_covariance)
119
+ cov = ShrunkCovariance(shrinkage=0.0)
120
+ cov.fit(X)
121
+ assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
122
+
123
+ # test with n_features = 1
124
+ X_1d = X[:, 0].reshape((-1, 1))
125
+ cov = ShrunkCovariance(shrinkage=0.3)
126
+ cov.fit(X_1d)
127
+ assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
128
+
129
+ # test shrinkage coeff on a simple data set (without saving precision)
130
+ cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
131
+ cov.fit(X)
132
+ assert cov.precision_ is None
133
+
134
+
135
+ def test_ledoit_wolf():
136
+ # Tests LedoitWolf module on a simple dataset.
137
+ # test shrinkage coeff on a simple data set
138
+ X_centered = X - X.mean(axis=0)
139
+ lw = LedoitWolf(assume_centered=True)
140
+ lw.fit(X_centered)
141
+ shrinkage_ = lw.shrinkage_
142
+
143
+ score_ = lw.score(X_centered)
144
+ assert_almost_equal(
145
+ ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_
146
+ )
147
+ assert_almost_equal(
148
+ ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6),
149
+ shrinkage_,
150
+ )
151
+ # compare shrunk covariance obtained from data and from MLE estimate
152
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(
153
+ X_centered, assume_centered=True
154
+ )
155
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
156
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
157
+ # compare estimates given by LW and ShrunkCovariance
158
+ scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
159
+ scov.fit(X_centered)
160
+ assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
161
+
162
+ # test with n_features = 1
163
+ X_1d = X[:, 0].reshape((-1, 1))
164
+ lw = LedoitWolf(assume_centered=True)
165
+ lw.fit(X_1d)
166
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d, assume_centered=True)
167
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
168
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
169
+ assert_array_almost_equal((X_1d**2).sum() / n_samples, lw.covariance_, 4)
170
+
171
+ # test shrinkage coeff on a simple data set (without saving precision)
172
+ lw = LedoitWolf(store_precision=False, assume_centered=True)
173
+ lw.fit(X_centered)
174
+ assert_almost_equal(lw.score(X_centered), score_, 4)
175
+ assert lw.precision_ is None
176
+
177
+ # Same tests without assuming centered data
178
+ # test shrinkage coeff on a simple data set
179
+ lw = LedoitWolf()
180
+ lw.fit(X)
181
+ assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
182
+ assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
183
+ assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
184
+ assert_almost_equal(
185
+ lw.shrinkage_, _ledoit_wolf(X=X, assume_centered=False, block_size=10000)[1]
186
+ )
187
+ assert_almost_equal(lw.score(X), score_, 4)
188
+ # compare shrunk covariance obtained from data and from MLE estimate
189
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
190
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
191
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
192
+ # compare estimates given by LW and ShrunkCovariance
193
+ scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
194
+ scov.fit(X)
195
+ assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
196
+
197
+ # test with n_features = 1
198
+ X_1d = X[:, 0].reshape((-1, 1))
199
+ lw = LedoitWolf()
200
+ lw.fit(X_1d)
201
+ assert_allclose(
202
+ X_1d.var(ddof=0),
203
+ _ledoit_wolf(X=X_1d, assume_centered=False, block_size=10000)[0],
204
+ )
205
+ lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
206
+ assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
207
+ assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
208
+ assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
209
+
210
+ # test with one sample
211
+ # warning should be raised when using only 1 sample
212
+ X_1sample = np.arange(5).reshape(1, 5)
213
+ lw = LedoitWolf()
214
+
215
+ warn_msg = "Only one sample available. You may want to reshape your data array"
216
+ with pytest.warns(UserWarning, match=warn_msg):
217
+ lw.fit(X_1sample)
218
+
219
+ assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
220
+
221
+ # test shrinkage coeff on a simple data set (without saving precision)
222
+ lw = LedoitWolf(store_precision=False)
223
+ lw.fit(X)
224
+ assert_almost_equal(lw.score(X), score_, 4)
225
+ assert lw.precision_ is None
226
+
227
+
228
+ def _naive_ledoit_wolf_shrinkage(X):
229
+ # A simple implementation of the formulas from Ledoit & Wolf
230
+
231
+ # The computation below achieves the following computations of the
232
+ # "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
233
+ # Large-Dimensional Covariance Matrices"
234
+ # beta and delta are given in the beginning of section 3.2
235
+ n_samples, n_features = X.shape
236
+ emp_cov = empirical_covariance(X, assume_centered=False)
237
+ mu = np.trace(emp_cov) / n_features
238
+ delta_ = emp_cov.copy()
239
+ delta_.flat[:: n_features + 1] -= mu
240
+ delta = (delta_**2).sum() / n_features
241
+ X2 = X**2
242
+ beta_ = (
243
+ 1.0
244
+ / (n_features * n_samples)
245
+ * np.sum(np.dot(X2.T, X2) / n_samples - emp_cov**2)
246
+ )
247
+
248
+ beta = min(beta_, delta)
249
+ shrinkage = beta / delta
250
+ return shrinkage
251
+
252
+
253
+ def test_ledoit_wolf_small():
254
+ # Compare our blocked implementation to the naive implementation
255
+ X_small = X[:, :4]
256
+ lw = LedoitWolf()
257
+ lw.fit(X_small)
258
+ shrinkage_ = lw.shrinkage_
259
+
260
+ assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
261
+
262
+
263
+ def test_ledoit_wolf_large():
264
+ # test that ledoit_wolf doesn't error on data that is wider than block_size
265
+ rng = np.random.RandomState(0)
266
+ # use a number of features that is larger than the block-size
267
+ X = rng.normal(size=(10, 20))
268
+ lw = LedoitWolf(block_size=10).fit(X)
269
+ # check that covariance is about diagonal (random normal noise)
270
+ assert_almost_equal(lw.covariance_, np.eye(20), 0)
271
+ cov = lw.covariance_
272
+
273
+ # check that the result is consistent with not splitting data into blocks.
274
+ lw = LedoitWolf(block_size=25).fit(X)
275
+ assert_almost_equal(lw.covariance_, cov)
276
+
277
+
278
+ @pytest.mark.parametrize(
279
+ "ledoit_wolf_fitting_function", [LedoitWolf().fit, ledoit_wolf_shrinkage]
280
+ )
281
+ def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function):
282
+ """Check that we validate X and raise proper error with 0-sample array."""
283
+ X_empty = np.zeros((0, 2))
284
+ with pytest.raises(ValueError, match="Found array with 0 sample"):
285
+ ledoit_wolf_fitting_function(X_empty)
286
+
287
+
288
+ def test_oas():
289
+ # Tests OAS module on a simple dataset.
290
+ # test shrinkage coeff on a simple data set
291
+ X_centered = X - X.mean(axis=0)
292
+ oa = OAS(assume_centered=True)
293
+ oa.fit(X_centered)
294
+ shrinkage_ = oa.shrinkage_
295
+ score_ = oa.score(X_centered)
296
+ # compare shrunk covariance obtained from data and from MLE estimate
297
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered, assume_centered=True)
298
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
299
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
300
+ # compare estimates given by OAS and ShrunkCovariance
301
+ scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
302
+ scov.fit(X_centered)
303
+ assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
304
+
305
+ # test with n_features = 1
306
+ X_1d = X[:, 0:1]
307
+ oa = OAS(assume_centered=True)
308
+ oa.fit(X_1d)
309
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
310
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
311
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
312
+ assert_array_almost_equal((X_1d**2).sum() / n_samples, oa.covariance_, 4)
313
+
314
+ # test shrinkage coeff on a simple data set (without saving precision)
315
+ oa = OAS(store_precision=False, assume_centered=True)
316
+ oa.fit(X_centered)
317
+ assert_almost_equal(oa.score(X_centered), score_, 4)
318
+ assert oa.precision_ is None
319
+
320
+ # Same tests without assuming centered data--------------------------------
321
+ # test shrinkage coeff on a simple data set
322
+ oa = OAS()
323
+ oa.fit(X)
324
+ assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
325
+ assert_almost_equal(oa.score(X), score_, 4)
326
+ # compare shrunk covariance obtained from data and from MLE estimate
327
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
328
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
329
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
330
+ # compare estimates given by OAS and ShrunkCovariance
331
+ scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
332
+ scov.fit(X)
333
+ assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
334
+
335
+ # test with n_features = 1
336
+ X_1d = X[:, 0].reshape((-1, 1))
337
+ oa = OAS()
338
+ oa.fit(X_1d)
339
+ oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
340
+ assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
341
+ assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
342
+ assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
343
+
344
+ # test with one sample
345
+ # warning should be raised when using only 1 sample
346
+ X_1sample = np.arange(5).reshape(1, 5)
347
+ oa = OAS()
348
+ warn_msg = "Only one sample available. You may want to reshape your data array"
349
+ with pytest.warns(UserWarning, match=warn_msg):
350
+ oa.fit(X_1sample)
351
+
352
+ assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
353
+
354
+ # test shrinkage coeff on a simple data set (without saving precision)
355
+ oa = OAS(store_precision=False)
356
+ oa.fit(X)
357
+ assert_almost_equal(oa.score(X), score_, 4)
358
+ assert oa.precision_ is None
359
+
360
+ # test function _oas without assuming centered data
361
+ X_1f = X[:, 0:1]
362
+ oa = OAS()
363
+ oa.fit(X_1f)
364
+ # compare shrunk covariance obtained from data and from MLE estimate
365
+ _oa_cov_from_mle, _oa_shrinkage_from_mle = _oas(X_1f)
366
+ assert_array_almost_equal(_oa_cov_from_mle, oa.covariance_, 4)
367
+ assert_almost_equal(_oa_shrinkage_from_mle, oa.shrinkage_)
368
+ assert_array_almost_equal((X_1f**2).sum() / n_samples, oa.covariance_, 4)
369
+
370
+
371
+ def test_EmpiricalCovariance_validates_mahalanobis():
372
+ """Checks that EmpiricalCovariance validates data with mahalanobis."""
373
+ cov = EmpiricalCovariance().fit(X)
374
+
375
+ msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input"
376
+ with pytest.raises(ValueError, match=msg):
377
+ cov.mahalanobis(X[:, :2])
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope).
3
+ """
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from sklearn.covariance import EllipticEnvelope
9
+ from sklearn.exceptions import NotFittedError
10
+ from sklearn.utils._testing import (
11
+ assert_almost_equal,
12
+ assert_array_almost_equal,
13
+ assert_array_equal,
14
+ )
15
+
16
+
17
+ def test_elliptic_envelope(global_random_seed):
18
+ rnd = np.random.RandomState(global_random_seed)
19
+ X = rnd.randn(100, 10)
20
+ clf = EllipticEnvelope(contamination=0.1)
21
+ with pytest.raises(NotFittedError):
22
+ clf.predict(X)
23
+ with pytest.raises(NotFittedError):
24
+ clf.decision_function(X)
25
+ clf.fit(X)
26
+ y_pred = clf.predict(X)
27
+ scores = clf.score_samples(X)
28
+ decisions = clf.decision_function(X)
29
+
30
+ assert_array_almost_equal(scores, -clf.mahalanobis(X))
31
+ assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
32
+ assert_almost_equal(
33
+ clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0
34
+ )
35
+ assert sum(y_pred == -1) == sum(decisions < 0)
36
+
37
+
38
+ def test_score_samples():
39
+ X_train = [[1, 1], [1, 2], [2, 1]]
40
+ clf1 = EllipticEnvelope(contamination=0.2).fit(X_train)
41
+ clf2 = EllipticEnvelope().fit(X_train)
42
+ assert_array_equal(
43
+ clf1.score_samples([[2.0, 2.0]]),
44
+ clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
45
+ )
46
+ assert_array_equal(
47
+ clf2.score_samples([[2.0, 2.0]]),
48
+ clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
49
+ )
50
+ assert_array_equal(
51
+ clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
52
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test the graphical_lasso module.
2
+ """
3
+ import sys
4
+ from io import StringIO
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from numpy.testing import assert_allclose
9
+ from scipy import linalg
10
+
11
+ from sklearn import datasets
12
+ from sklearn.covariance import (
13
+ GraphicalLasso,
14
+ GraphicalLassoCV,
15
+ empirical_covariance,
16
+ graphical_lasso,
17
+ )
18
+ from sklearn.datasets import make_sparse_spd_matrix
19
+ from sklearn.utils import check_random_state
20
+ from sklearn.utils._testing import (
21
+ _convert_container,
22
+ assert_array_almost_equal,
23
+ assert_array_less,
24
+ )
25
+
26
+
27
+ def test_graphical_lassos(random_state=1):
28
+ """Test the graphical lasso solvers.
29
+
30
+ This checks is unstable for some random seeds where the covariance found with "cd"
31
+ and "lars" solvers are different (4 cases / 100 tries).
32
+ """
33
+ # Sample data from a sparse multivariate normal
34
+ dim = 20
35
+ n_samples = 100
36
+ random_state = check_random_state(random_state)
37
+ prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state)
38
+ cov = linalg.inv(prec)
39
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
40
+ emp_cov = empirical_covariance(X)
41
+
42
+ for alpha in (0.0, 0.1, 0.25):
43
+ covs = dict()
44
+ icovs = dict()
45
+ for method in ("cd", "lars"):
46
+ cov_, icov_, costs = graphical_lasso(
47
+ emp_cov, return_costs=True, alpha=alpha, mode=method
48
+ )
49
+ covs[method] = cov_
50
+ icovs[method] = icov_
51
+ costs, dual_gap = np.array(costs).T
52
+ # Check that the costs always decrease (doesn't hold if alpha == 0)
53
+ if not alpha == 0:
54
+ # use 1e-12 since the cost can be exactly 0
55
+ assert_array_less(np.diff(costs), 1e-12)
56
+ # Check that the 2 approaches give similar results
57
+ assert_allclose(covs["cd"], covs["lars"], atol=5e-4)
58
+ assert_allclose(icovs["cd"], icovs["lars"], atol=5e-4)
59
+
60
+ # Smoke test the estimator
61
+ model = GraphicalLasso(alpha=0.25).fit(X)
62
+ model.score(X)
63
+ assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4)
64
+ assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4)
65
+
66
+ # For a centered matrix, assume_centered could be chosen True or False
67
+ # Check that this returns indeed the same result for centered data
68
+ Z = X - X.mean(0)
69
+ precs = list()
70
+ for assume_centered in (False, True):
71
+ prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_
72
+ precs.append(prec_)
73
+ assert_array_almost_equal(precs[0], precs[1])
74
+
75
+
76
+ def test_graphical_lasso_when_alpha_equals_0():
77
+ """Test graphical_lasso's early return condition when alpha=0."""
78
+ X = np.random.randn(100, 10)
79
+ emp_cov = empirical_covariance(X, assume_centered=True)
80
+
81
+ model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov)
82
+ assert_allclose(model.precision_, np.linalg.inv(emp_cov))
83
+
84
+ _, precision = graphical_lasso(emp_cov, alpha=0)
85
+ assert_allclose(precision, np.linalg.inv(emp_cov))
86
+
87
+
88
+ @pytest.mark.parametrize("mode", ["cd", "lars"])
89
+ def test_graphical_lasso_n_iter(mode):
90
+ X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0)
91
+ emp_cov = empirical_covariance(X)
92
+
93
+ _, _, n_iter = graphical_lasso(
94
+ emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True
95
+ )
96
+ assert n_iter == 2
97
+
98
+
99
+ def test_graphical_lasso_iris():
100
+ # Hard-coded solution from R glasso package for alpha=1.0
101
+ # (need to set penalize.diagonal to FALSE)
102
+ cov_R = np.array(
103
+ [
104
+ [0.68112222, 0.0000000, 0.265820, 0.02464314],
105
+ [0.00000000, 0.1887129, 0.000000, 0.00000000],
106
+ [0.26582000, 0.0000000, 3.095503, 0.28697200],
107
+ [0.02464314, 0.0000000, 0.286972, 0.57713289],
108
+ ]
109
+ )
110
+ icov_R = np.array(
111
+ [
112
+ [1.5190747, 0.000000, -0.1304475, 0.0000000],
113
+ [0.0000000, 5.299055, 0.0000000, 0.0000000],
114
+ [-0.1304475, 0.000000, 0.3498624, -0.1683946],
115
+ [0.0000000, 0.000000, -0.1683946, 1.8164353],
116
+ ]
117
+ )
118
+ X = datasets.load_iris().data
119
+ emp_cov = empirical_covariance(X)
120
+ for method in ("cd", "lars"):
121
+ cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method)
122
+ assert_array_almost_equal(cov, cov_R)
123
+ assert_array_almost_equal(icov, icov_R)
124
+
125
+
126
+ def test_graph_lasso_2D():
127
+ # Hard-coded solution from Python skggm package
128
+ # obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
129
+ cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]])
130
+
131
+ icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]])
132
+ X = datasets.load_iris().data[:, 2:]
133
+ emp_cov = empirical_covariance(X)
134
+ for method in ("cd", "lars"):
135
+ cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method)
136
+ assert_array_almost_equal(cov, cov_skggm)
137
+ assert_array_almost_equal(icov, icov_skggm)
138
+
139
+
140
+ def test_graphical_lasso_iris_singular():
141
+ # Small subset of rows to test the rank-deficient case
142
+ # Need to choose samples such that none of the variances are zero
143
+ indices = np.arange(10, 13)
144
+
145
+ # Hard-coded solution from R glasso package for alpha=0.01
146
+ cov_R = np.array(
147
+ [
148
+ [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
149
+ [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
150
+ [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
151
+ [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222],
152
+ ]
153
+ )
154
+ icov_R = np.array(
155
+ [
156
+ [24.42244057, -16.831679593, 0.0, 0.0],
157
+ [-16.83168201, 24.351841681, -6.206896552, -12.5],
158
+ [0.0, -6.206896171, 153.103448276, 0.0],
159
+ [0.0, -12.499999143, 0.0, 462.5],
160
+ ]
161
+ )
162
+ X = datasets.load_iris().data[indices, :]
163
+ emp_cov = empirical_covariance(X)
164
+ for method in ("cd", "lars"):
165
+ cov, icov = graphical_lasso(
166
+ emp_cov, alpha=0.01, return_costs=False, mode=method
167
+ )
168
+ assert_array_almost_equal(cov, cov_R, decimal=5)
169
+ assert_array_almost_equal(icov, icov_R, decimal=5)
170
+
171
+
172
+ def test_graphical_lasso_cv(random_state=1):
173
+ # Sample data from a sparse multivariate normal
174
+ dim = 5
175
+ n_samples = 6
176
+ random_state = check_random_state(random_state)
177
+ prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state)
178
+ cov = linalg.inv(prec)
179
+ X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
180
+ # Capture stdout, to smoke test the verbose mode
181
+ orig_stdout = sys.stdout
182
+ try:
183
+ sys.stdout = StringIO()
184
+ # We need verbose very high so that Parallel prints on stdout
185
+ GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
186
+ finally:
187
+ sys.stdout = orig_stdout
188
+
189
+
190
+ @pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"])
191
+ def test_graphical_lasso_cv_alphas_iterable(alphas_container_type):
192
+ """Check that we can pass an array-like to `alphas`.
193
+
194
+ Non-regression test for:
195
+ https://github.com/scikit-learn/scikit-learn/issues/22489
196
+ """
197
+ true_cov = np.array(
198
+ [
199
+ [0.8, 0.0, 0.2, 0.0],
200
+ [0.0, 0.4, 0.0, 0.0],
201
+ [0.2, 0.0, 0.3, 0.1],
202
+ [0.0, 0.0, 0.1, 0.7],
203
+ ]
204
+ )
205
+ rng = np.random.RandomState(0)
206
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
207
+ alphas = _convert_container([0.02, 0.03], alphas_container_type)
208
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
209
+
210
+
211
+ @pytest.mark.parametrize(
212
+ "alphas,err_type,err_msg",
213
+ [
214
+ ([-0.02, 0.03], ValueError, "must be > 0"),
215
+ ([0, 0.03], ValueError, "must be > 0"),
216
+ (["not_number", 0.03], TypeError, "must be an instance of float"),
217
+ ],
218
+ )
219
+ def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg):
220
+ """Check that if an array-like containing a value
221
+ outside of (0, inf] is passed to `alphas`, a ValueError is raised.
222
+ Check if a string is passed, a TypeError is raised.
223
+ """
224
+ true_cov = np.array(
225
+ [
226
+ [0.8, 0.0, 0.2, 0.0],
227
+ [0.0, 0.4, 0.0, 0.0],
228
+ [0.2, 0.0, 0.3, 0.1],
229
+ [0.0, 0.0, 0.1, 0.7],
230
+ ]
231
+ )
232
+ rng = np.random.RandomState(0)
233
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
234
+
235
+ with pytest.raises(err_type, match=err_msg):
236
+ GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
237
+
238
+
239
+ def test_graphical_lasso_cv_scores():
240
+ splits = 4
241
+ n_alphas = 5
242
+ n_refinements = 3
243
+ true_cov = np.array(
244
+ [
245
+ [0.8, 0.0, 0.2, 0.0],
246
+ [0.0, 0.4, 0.0, 0.0],
247
+ [0.2, 0.0, 0.3, 0.1],
248
+ [0.0, 0.0, 0.1, 0.7],
249
+ ]
250
+ )
251
+ rng = np.random.RandomState(0)
252
+ X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
253
+ cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit(
254
+ X
255
+ )
256
+
257
+ cv_results = cov.cv_results_
258
+ # alpha and one for each split
259
+
260
+ total_alphas = n_refinements * n_alphas + 1
261
+ keys = ["alphas"]
262
+ split_keys = [f"split{i}_test_score" for i in range(splits)]
263
+ for key in keys + split_keys:
264
+ assert key in cv_results
265
+ assert len(cv_results[key]) == total_alphas
266
+
267
+ cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
268
+ expected_mean = cv_scores.mean(axis=0)
269
+ expected_std = cv_scores.std(axis=0)
270
+
271
+ assert_allclose(cov.cv_results_["mean_test_score"], expected_mean)
272
+ assert_allclose(cov.cv_results_["std_test_score"], expected_std)
273
+
274
+
275
+ # TODO(1.5): remove in 1.5
276
+ def test_graphical_lasso_cov_init_deprecation():
277
+ """Check that we raise a deprecation warning if providing `cov_init` in
278
+ `graphical_lasso`."""
279
+ rng, dim, n_samples = np.random.RandomState(0), 20, 100
280
+ prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=0)
281
+ cov = linalg.inv(prec)
282
+ X = rng.multivariate_normal(np.zeros(dim), cov, size=n_samples)
283
+
284
+ emp_cov = empirical_covariance(X)
285
+ with pytest.warns(FutureWarning, match="cov_init parameter is deprecated"):
286
+ graphical_lasso(emp_cov, alpha=0.1, cov_init=emp_cov)
llmeval-env/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Alexandre Gramfort <[email protected]>
2
+ # Gael Varoquaux <[email protected]>
3
+ # Virgile Fritsch <[email protected]>
4
+ #
5
+ # License: BSD 3 clause
6
+
7
+ import itertools
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from sklearn import datasets
13
+ from sklearn.covariance import MinCovDet, empirical_covariance, fast_mcd
14
+ from sklearn.utils._testing import assert_array_almost_equal
15
+
16
+ X = datasets.load_iris().data
17
+ X_1d = X[:, 0]
18
+ n_samples, n_features = X.shape
19
+
20
+
21
+ def test_mcd(global_random_seed):
22
+ # Tests the FastMCD algorithm implementation
23
+ # Small data set
24
+ # test without outliers (random independent normal data)
25
+ launch_mcd_on_dataset(100, 5, 0, 0.02, 0.1, 75, global_random_seed)
26
+ # test with a contaminated data set (medium contamination)
27
+ launch_mcd_on_dataset(100, 5, 20, 0.3, 0.3, 65, global_random_seed)
28
+ # test with a contaminated data set (strong contamination)
29
+ launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50, global_random_seed)
30
+
31
+ # Medium data set
32
+ launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540, global_random_seed)
33
+
34
+ # Large data set
35
+ launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870, global_random_seed)
36
+
37
+ # 1D data set
38
+ launch_mcd_on_dataset(500, 1, 100, 0.02, 0.02, 350, global_random_seed)
39
+
40
+
41
+ def test_fast_mcd_on_invalid_input():
42
+ X = np.arange(100)
43
+ msg = "Expected 2D array, got 1D array instead"
44
+ with pytest.raises(ValueError, match=msg):
45
+ fast_mcd(X)
46
+
47
+
48
+ def test_mcd_class_on_invalid_input():
49
+ X = np.arange(100)
50
+ mcd = MinCovDet()
51
+ msg = "Expected 2D array, got 1D array instead"
52
+ with pytest.raises(ValueError, match=msg):
53
+ mcd.fit(X)
54
+
55
+
56
+ def launch_mcd_on_dataset(
57
+ n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support, seed
58
+ ):
59
+ rand_gen = np.random.RandomState(seed)
60
+ data = rand_gen.randn(n_samples, n_features)
61
+ # add some outliers
62
+ outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
63
+ outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
64
+ data[outliers_index] += outliers_offset
65
+ inliers_mask = np.ones(n_samples).astype(bool)
66
+ inliers_mask[outliers_index] = False
67
+
68
+ pure_data = data[inliers_mask]
69
+ # compute MCD by fitting an object
70
+ mcd_fit = MinCovDet(random_state=seed).fit(data)
71
+ T = mcd_fit.location_
72
+ S = mcd_fit.covariance_
73
+ H = mcd_fit.support_
74
+ # compare with the estimates learnt from the inliers
75
+ error_location = np.mean((pure_data.mean(0) - T) ** 2)
76
+ assert error_location < tol_loc
77
+ error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
78
+ assert error_cov < tol_cov
79
+ assert np.sum(H) >= tol_support
80
+ assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
81
+
82
+
83
+ def test_mcd_issue1127():
84
+ # Check that the code does not break with X.shape = (3, 1)
85
+ # (i.e. n_support = n_samples)
86
+ rnd = np.random.RandomState(0)
87
+ X = rnd.normal(size=(3, 1))
88
+ mcd = MinCovDet()
89
+ mcd.fit(X)
90
+
91
+
92
+ def test_mcd_issue3367(global_random_seed):
93
+ # Check that MCD completes when the covariance matrix is singular
94
+ # i.e. one of the rows and columns are all zeros
95
+ rand_gen = np.random.RandomState(global_random_seed)
96
+
97
+ # Think of these as the values for X and Y -> 10 values between -5 and 5
98
+ data_values = np.linspace(-5, 5, 10).tolist()
99
+ # Get the cartesian product of all possible coordinate pairs from above set
100
+ data = np.array(list(itertools.product(data_values, data_values)))
101
+
102
+ # Add a third column that's all zeros to make our data a set of point
103
+ # within a plane, which means that the covariance matrix will be singular
104
+ data = np.hstack((data, np.zeros((data.shape[0], 1))))
105
+
106
+ # The below line of code should raise an exception if the covariance matrix
107
+ # is singular. As a further test, since we have points in XYZ, the
108
+ # principle components (Eigenvectors) of these directly relate to the
109
+ # geometry of the points. Since it's a plane, we should be able to test
110
+ # that the Eigenvector that corresponds to the smallest Eigenvalue is the
111
+ # plane normal, specifically [0, 0, 1], since everything is in the XY plane
112
+ # (as I've set it up above). To do this one would start by:
113
+ #
114
+ # evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
115
+ # normal = evecs[:, np.argmin(evals)]
116
+ #
117
+ # After which we need to assert that our `normal` is equal to [0, 0, 1].
118
+ # Do note that there is floating point error associated with this, so it's
119
+ # best to subtract the two and then compare some small tolerance (e.g.
120
+ # 1e-12).
121
+ MinCovDet(random_state=rand_gen).fit(data)
122
+
123
+
124
+ def test_mcd_support_covariance_is_zero():
125
+ # Check that MCD returns a ValueError with informative message when the
126
+ # covariance of the support data is equal to 0.
127
+ X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
128
+ X_1 = X_1.reshape(-1, 1)
129
+ X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
130
+ X_2 = X_2.reshape(-1, 1)
131
+ msg = (
132
+ "The covariance matrix of the support data is equal to 0, try to "
133
+ "increase support_fraction"
134
+ )
135
+ for X in [X_1, X_2]:
136
+ with pytest.raises(ValueError, match=msg):
137
+ MinCovDet().fit(X)
138
+
139
+
140
+ def test_mcd_increasing_det_warning(global_random_seed):
141
+ # Check that a warning is raised if we observe increasing determinants
142
+ # during the c_step. In theory the sequence of determinants should be
143
+ # decreasing. Increasing determinants are likely due to ill-conditioned
144
+ # covariance matrices that result in poor precision matrices.
145
+
146
+ X = [
147
+ [5.1, 3.5, 1.4, 0.2],
148
+ [4.9, 3.0, 1.4, 0.2],
149
+ [4.7, 3.2, 1.3, 0.2],
150
+ [4.6, 3.1, 1.5, 0.2],
151
+ [5.0, 3.6, 1.4, 0.2],
152
+ [4.6, 3.4, 1.4, 0.3],
153
+ [5.0, 3.4, 1.5, 0.2],
154
+ [4.4, 2.9, 1.4, 0.2],
155
+ [4.9, 3.1, 1.5, 0.1],
156
+ [5.4, 3.7, 1.5, 0.2],
157
+ [4.8, 3.4, 1.6, 0.2],
158
+ [4.8, 3.0, 1.4, 0.1],
159
+ [4.3, 3.0, 1.1, 0.1],
160
+ [5.1, 3.5, 1.4, 0.3],
161
+ [5.7, 3.8, 1.7, 0.3],
162
+ [5.4, 3.4, 1.7, 0.2],
163
+ [4.6, 3.6, 1.0, 0.2],
164
+ [5.0, 3.0, 1.6, 0.2],
165
+ [5.2, 3.5, 1.5, 0.2],
166
+ ]
167
+
168
+ mcd = MinCovDet(support_fraction=0.5, random_state=global_random_seed)
169
+ warn_msg = "Determinant has increased"
170
+ with pytest.warns(RuntimeWarning, match=warn_msg):
171
+ mcd.fit(X)
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.ensemble` module includes ensemble-based methods for
3
+ classification, regression and anomaly detection.
4
+ """
5
+ from ._bagging import BaggingClassifier, BaggingRegressor
6
+ from ._base import BaseEnsemble
7
+ from ._forest import (
8
+ ExtraTreesClassifier,
9
+ ExtraTreesRegressor,
10
+ RandomForestClassifier,
11
+ RandomForestRegressor,
12
+ RandomTreesEmbedding,
13
+ )
14
+ from ._gb import GradientBoostingClassifier, GradientBoostingRegressor
15
+ from ._hist_gradient_boosting.gradient_boosting import (
16
+ HistGradientBoostingClassifier,
17
+ HistGradientBoostingRegressor,
18
+ )
19
+ from ._iforest import IsolationForest
20
+ from ._stacking import StackingClassifier, StackingRegressor
21
+ from ._voting import VotingClassifier, VotingRegressor
22
+ from ._weight_boosting import AdaBoostClassifier, AdaBoostRegressor
23
+
24
+ __all__ = [
25
+ "BaseEnsemble",
26
+ "RandomForestClassifier",
27
+ "RandomForestRegressor",
28
+ "RandomTreesEmbedding",
29
+ "ExtraTreesClassifier",
30
+ "ExtraTreesRegressor",
31
+ "BaggingClassifier",
32
+ "BaggingRegressor",
33
+ "IsolationForest",
34
+ "GradientBoostingClassifier",
35
+ "GradientBoostingRegressor",
36
+ "AdaBoostClassifier",
37
+ "AdaBoostRegressor",
38
+ "VotingClassifier",
39
+ "VotingRegressor",
40
+ "StackingClassifier",
41
+ "StackingRegressor",
42
+ "HistGradientBoostingClassifier",
43
+ "HistGradientBoostingRegressor",
44
+ ]
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc ADDED
Binary file (98.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc ADDED
Binary file (67.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc ADDED
Binary file (35.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_bagging.py ADDED
@@ -0,0 +1,1242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bagging meta-estimator."""
2
+
3
+ # Author: Gilles Louppe <[email protected]>
4
+ # License: BSD 3 clause
5
+
6
+
7
+ import itertools
8
+ import numbers
9
+ from abc import ABCMeta, abstractmethod
10
+ from functools import partial
11
+ from numbers import Integral
12
+ from warnings import warn
13
+
14
+ import numpy as np
15
+
16
+ from ..base import ClassifierMixin, RegressorMixin, _fit_context
17
+ from ..metrics import accuracy_score, r2_score
18
+ from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
19
+ from ..utils import check_random_state, column_or_1d, indices_to_mask
20
+ from ..utils._param_validation import HasMethods, Interval, RealNotInt
21
+ from ..utils._tags import _safe_tags
22
+ from ..utils.metadata_routing import (
23
+ _raise_for_unsupported_routing,
24
+ _RoutingNotSupportedMixin,
25
+ )
26
+ from ..utils.metaestimators import available_if
27
+ from ..utils.multiclass import check_classification_targets
28
+ from ..utils.parallel import Parallel, delayed
29
+ from ..utils.random import sample_without_replacement
30
+ from ..utils.validation import _check_sample_weight, check_is_fitted, has_fit_parameter
31
+ from ._base import BaseEnsemble, _partition_estimators
32
+
33
+ __all__ = ["BaggingClassifier", "BaggingRegressor"]
34
+
35
+ MAX_INT = np.iinfo(np.int32).max
36
+
37
+
38
+ def _generate_indices(random_state, bootstrap, n_population, n_samples):
39
+ """Draw randomly sampled indices."""
40
+ # Draw sample indices
41
+ if bootstrap:
42
+ indices = random_state.randint(0, n_population, n_samples)
43
+ else:
44
+ indices = sample_without_replacement(
45
+ n_population, n_samples, random_state=random_state
46
+ )
47
+
48
+ return indices
49
+
50
+
51
+ def _generate_bagging_indices(
52
+ random_state,
53
+ bootstrap_features,
54
+ bootstrap_samples,
55
+ n_features,
56
+ n_samples,
57
+ max_features,
58
+ max_samples,
59
+ ):
60
+ """Randomly draw feature and sample indices."""
61
+ # Get valid random state
62
+ random_state = check_random_state(random_state)
63
+
64
+ # Draw indices
65
+ feature_indices = _generate_indices(
66
+ random_state, bootstrap_features, n_features, max_features
67
+ )
68
+ sample_indices = _generate_indices(
69
+ random_state, bootstrap_samples, n_samples, max_samples
70
+ )
71
+
72
+ return feature_indices, sample_indices
73
+
74
+
75
+ def _parallel_build_estimators(
76
+ n_estimators,
77
+ ensemble,
78
+ X,
79
+ y,
80
+ sample_weight,
81
+ seeds,
82
+ total_n_estimators,
83
+ verbose,
84
+ check_input,
85
+ ):
86
+ """Private function used to build a batch of estimators within a job."""
87
+ # Retrieve settings
88
+ n_samples, n_features = X.shape
89
+ max_features = ensemble._max_features
90
+ max_samples = ensemble._max_samples
91
+ bootstrap = ensemble.bootstrap
92
+ bootstrap_features = ensemble.bootstrap_features
93
+ support_sample_weight = has_fit_parameter(ensemble.estimator_, "sample_weight")
94
+ has_check_input = has_fit_parameter(ensemble.estimator_, "check_input")
95
+ requires_feature_indexing = bootstrap_features or max_features != n_features
96
+
97
+ if not support_sample_weight and sample_weight is not None:
98
+ raise ValueError("The base estimator doesn't support sample weight")
99
+
100
+ # Build estimators
101
+ estimators = []
102
+ estimators_features = []
103
+
104
+ for i in range(n_estimators):
105
+ if verbose > 1:
106
+ print(
107
+ "Building estimator %d of %d for this parallel run (total %d)..."
108
+ % (i + 1, n_estimators, total_n_estimators)
109
+ )
110
+
111
+ random_state = seeds[i]
112
+ estimator = ensemble._make_estimator(append=False, random_state=random_state)
113
+
114
+ if has_check_input:
115
+ estimator_fit = partial(estimator.fit, check_input=check_input)
116
+ else:
117
+ estimator_fit = estimator.fit
118
+
119
+ # Draw random feature, sample indices
120
+ features, indices = _generate_bagging_indices(
121
+ random_state,
122
+ bootstrap_features,
123
+ bootstrap,
124
+ n_features,
125
+ n_samples,
126
+ max_features,
127
+ max_samples,
128
+ )
129
+
130
+ # Draw samples, using sample weights, and then fit
131
+ if support_sample_weight:
132
+ if sample_weight is None:
133
+ curr_sample_weight = np.ones((n_samples,))
134
+ else:
135
+ curr_sample_weight = sample_weight.copy()
136
+
137
+ if bootstrap:
138
+ sample_counts = np.bincount(indices, minlength=n_samples)
139
+ curr_sample_weight *= sample_counts
140
+ else:
141
+ not_indices_mask = ~indices_to_mask(indices, n_samples)
142
+ curr_sample_weight[not_indices_mask] = 0
143
+
144
+ X_ = X[:, features] if requires_feature_indexing else X
145
+ estimator_fit(X_, y, sample_weight=curr_sample_weight)
146
+ else:
147
+ X_ = X[indices][:, features] if requires_feature_indexing else X[indices]
148
+ estimator_fit(X_, y[indices])
149
+
150
+ estimators.append(estimator)
151
+ estimators_features.append(features)
152
+
153
+ return estimators, estimators_features
154
+
155
+
156
+ def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
157
+ """Private function used to compute (proba-)predictions within a job."""
158
+ n_samples = X.shape[0]
159
+ proba = np.zeros((n_samples, n_classes))
160
+
161
+ for estimator, features in zip(estimators, estimators_features):
162
+ if hasattr(estimator, "predict_proba"):
163
+ proba_estimator = estimator.predict_proba(X[:, features])
164
+
165
+ if n_classes == len(estimator.classes_):
166
+ proba += proba_estimator
167
+
168
+ else:
169
+ proba[:, estimator.classes_] += proba_estimator[
170
+ :, range(len(estimator.classes_))
171
+ ]
172
+
173
+ else:
174
+ # Resort to voting
175
+ predictions = estimator.predict(X[:, features])
176
+
177
+ for i in range(n_samples):
178
+ proba[i, predictions[i]] += 1
179
+
180
+ return proba
181
+
182
+
183
+ def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
184
+ """Private function used to compute log probabilities within a job."""
185
+ n_samples = X.shape[0]
186
+ log_proba = np.empty((n_samples, n_classes))
187
+ log_proba.fill(-np.inf)
188
+ all_classes = np.arange(n_classes, dtype=int)
189
+
190
+ for estimator, features in zip(estimators, estimators_features):
191
+ log_proba_estimator = estimator.predict_log_proba(X[:, features])
192
+
193
+ if n_classes == len(estimator.classes_):
194
+ log_proba = np.logaddexp(log_proba, log_proba_estimator)
195
+
196
+ else:
197
+ log_proba[:, estimator.classes_] = np.logaddexp(
198
+ log_proba[:, estimator.classes_],
199
+ log_proba_estimator[:, range(len(estimator.classes_))],
200
+ )
201
+
202
+ missing = np.setdiff1d(all_classes, estimator.classes_)
203
+ log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)
204
+
205
+ return log_proba
206
+
207
+
208
+ def _parallel_decision_function(estimators, estimators_features, X):
209
+ """Private function used to compute decisions within a job."""
210
+ return sum(
211
+ estimator.decision_function(X[:, features])
212
+ for estimator, features in zip(estimators, estimators_features)
213
+ )
214
+
215
+
216
+ def _parallel_predict_regression(estimators, estimators_features, X):
217
+ """Private function used to compute predictions within a job."""
218
+ return sum(
219
+ estimator.predict(X[:, features])
220
+ for estimator, features in zip(estimators, estimators_features)
221
+ )
222
+
223
+
224
+ def _estimator_has(attr):
225
+ """Check if we can delegate a method to the underlying estimator.
226
+
227
+ First, we check the first fitted estimator if available, otherwise we
228
+ check the estimator attribute.
229
+ """
230
+
231
+ def check(self):
232
+ if hasattr(self, "estimators_"):
233
+ return hasattr(self.estimators_[0], attr)
234
+ else: # self.estimator is not None
235
+ return hasattr(self.estimator, attr)
236
+
237
+ return check
238
+
239
+
240
+ class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
241
+ """Base class for Bagging meta-estimator.
242
+
243
+ Warning: This class should not be used directly. Use derived classes
244
+ instead.
245
+ """
246
+
247
+ _parameter_constraints: dict = {
248
+ "estimator": [HasMethods(["fit", "predict"]), None],
249
+ "n_estimators": [Interval(Integral, 1, None, closed="left")],
250
+ "max_samples": [
251
+ Interval(Integral, 1, None, closed="left"),
252
+ Interval(RealNotInt, 0, 1, closed="right"),
253
+ ],
254
+ "max_features": [
255
+ Interval(Integral, 1, None, closed="left"),
256
+ Interval(RealNotInt, 0, 1, closed="right"),
257
+ ],
258
+ "bootstrap": ["boolean"],
259
+ "bootstrap_features": ["boolean"],
260
+ "oob_score": ["boolean"],
261
+ "warm_start": ["boolean"],
262
+ "n_jobs": [None, Integral],
263
+ "random_state": ["random_state"],
264
+ "verbose": ["verbose"],
265
+ }
266
+
267
+ @abstractmethod
268
+ def __init__(
269
+ self,
270
+ estimator=None,
271
+ n_estimators=10,
272
+ *,
273
+ max_samples=1.0,
274
+ max_features=1.0,
275
+ bootstrap=True,
276
+ bootstrap_features=False,
277
+ oob_score=False,
278
+ warm_start=False,
279
+ n_jobs=None,
280
+ random_state=None,
281
+ verbose=0,
282
+ ):
283
+ super().__init__(
284
+ estimator=estimator,
285
+ n_estimators=n_estimators,
286
+ )
287
+ self.max_samples = max_samples
288
+ self.max_features = max_features
289
+ self.bootstrap = bootstrap
290
+ self.bootstrap_features = bootstrap_features
291
+ self.oob_score = oob_score
292
+ self.warm_start = warm_start
293
+ self.n_jobs = n_jobs
294
+ self.random_state = random_state
295
+ self.verbose = verbose
296
+
297
+ @_fit_context(
298
+ # BaseBagging.estimator is not validated yet
299
+ prefer_skip_nested_validation=False
300
+ )
301
+ def fit(self, X, y, sample_weight=None):
302
+ """Build a Bagging ensemble of estimators from the training set (X, y).
303
+
304
+ Parameters
305
+ ----------
306
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
307
+ The training input samples. Sparse matrices are accepted only if
308
+ they are supported by the base estimator.
309
+
310
+ y : array-like of shape (n_samples,)
311
+ The target values (class labels in classification, real numbers in
312
+ regression).
313
+
314
+ sample_weight : array-like of shape (n_samples,), default=None
315
+ Sample weights. If None, then samples are equally weighted.
316
+ Note that this is supported only if the base estimator supports
317
+ sample weighting.
318
+
319
+ Returns
320
+ -------
321
+ self : object
322
+ Fitted estimator.
323
+ """
324
+ _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
325
+ # Convert data (X is required to be 2d and indexable)
326
+ X, y = self._validate_data(
327
+ X,
328
+ y,
329
+ accept_sparse=["csr", "csc"],
330
+ dtype=None,
331
+ force_all_finite=False,
332
+ multi_output=True,
333
+ )
334
+ return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
335
+
336
+ def _parallel_args(self):
337
+ return {}
338
+
339
+ def _fit(
340
+ self,
341
+ X,
342
+ y,
343
+ max_samples=None,
344
+ max_depth=None,
345
+ sample_weight=None,
346
+ check_input=True,
347
+ ):
348
+ """Build a Bagging ensemble of estimators from the training
349
+ set (X, y).
350
+
351
+ Parameters
352
+ ----------
353
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
354
+ The training input samples. Sparse matrices are accepted only if
355
+ they are supported by the base estimator.
356
+
357
+ y : array-like of shape (n_samples,)
358
+ The target values (class labels in classification, real numbers in
359
+ regression).
360
+
361
+ max_samples : int or float, default=None
362
+ Argument to use instead of self.max_samples.
363
+
364
+ max_depth : int, default=None
365
+ Override value used when constructing base estimator. Only
366
+ supported if the base estimator has a max_depth parameter.
367
+
368
+ sample_weight : array-like of shape (n_samples,), default=None
369
+ Sample weights. If None, then samples are equally weighted.
370
+ Note that this is supported only if the base estimator supports
371
+ sample weighting.
372
+
373
+ check_input : bool, default=True
374
+ Override value used when fitting base estimator. Only supported
375
+ if the base estimator has a check_input parameter for fit function.
376
+
377
+ Returns
378
+ -------
379
+ self : object
380
+ Fitted estimator.
381
+ """
382
+ random_state = check_random_state(self.random_state)
383
+
384
+ if sample_weight is not None:
385
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
386
+
387
+ # Remap output
388
+ n_samples = X.shape[0]
389
+ self._n_samples = n_samples
390
+ y = self._validate_y(y)
391
+
392
+ # Check parameters
393
+ self._validate_estimator()
394
+
395
+ if max_depth is not None:
396
+ self.estimator_.max_depth = max_depth
397
+
398
+ # Validate max_samples
399
+ if max_samples is None:
400
+ max_samples = self.max_samples
401
+ elif not isinstance(max_samples, numbers.Integral):
402
+ max_samples = int(max_samples * X.shape[0])
403
+
404
+ if max_samples > X.shape[0]:
405
+ raise ValueError("max_samples must be <= n_samples")
406
+
407
+ # Store validated integer row sampling value
408
+ self._max_samples = max_samples
409
+
410
+ # Validate max_features
411
+ if isinstance(self.max_features, numbers.Integral):
412
+ max_features = self.max_features
413
+ elif isinstance(self.max_features, float):
414
+ max_features = int(self.max_features * self.n_features_in_)
415
+
416
+ if max_features > self.n_features_in_:
417
+ raise ValueError("max_features must be <= n_features")
418
+
419
+ max_features = max(1, int(max_features))
420
+
421
+ # Store validated integer feature sampling value
422
+ self._max_features = max_features
423
+
424
+ # Other checks
425
+ if not self.bootstrap and self.oob_score:
426
+ raise ValueError("Out of bag estimation only available if bootstrap=True")
427
+
428
+ if self.warm_start and self.oob_score:
429
+ raise ValueError("Out of bag estimate only available if warm_start=False")
430
+
431
+ if hasattr(self, "oob_score_") and self.warm_start:
432
+ del self.oob_score_
433
+
434
+ if not self.warm_start or not hasattr(self, "estimators_"):
435
+ # Free allocated memory, if any
436
+ self.estimators_ = []
437
+ self.estimators_features_ = []
438
+
439
+ n_more_estimators = self.n_estimators - len(self.estimators_)
440
+
441
+ if n_more_estimators < 0:
442
+ raise ValueError(
443
+ "n_estimators=%d must be larger or equal to "
444
+ "len(estimators_)=%d when warm_start==True"
445
+ % (self.n_estimators, len(self.estimators_))
446
+ )
447
+
448
+ elif n_more_estimators == 0:
449
+ warn(
450
+ "Warm-start fitting without increasing n_estimators does not "
451
+ "fit new trees."
452
+ )
453
+ return self
454
+
455
+ # Parallel loop
456
+ n_jobs, n_estimators, starts = _partition_estimators(
457
+ n_more_estimators, self.n_jobs
458
+ )
459
+ total_n_estimators = sum(n_estimators)
460
+
461
+ # Advance random state to state after training
462
+ # the first n_estimators
463
+ if self.warm_start and len(self.estimators_) > 0:
464
+ random_state.randint(MAX_INT, size=len(self.estimators_))
465
+
466
+ seeds = random_state.randint(MAX_INT, size=n_more_estimators)
467
+ self._seeds = seeds
468
+
469
+ all_results = Parallel(
470
+ n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
471
+ )(
472
+ delayed(_parallel_build_estimators)(
473
+ n_estimators[i],
474
+ self,
475
+ X,
476
+ y,
477
+ sample_weight,
478
+ seeds[starts[i] : starts[i + 1]],
479
+ total_n_estimators,
480
+ verbose=self.verbose,
481
+ check_input=check_input,
482
+ )
483
+ for i in range(n_jobs)
484
+ )
485
+
486
+ # Reduce
487
+ self.estimators_ += list(
488
+ itertools.chain.from_iterable(t[0] for t in all_results)
489
+ )
490
+ self.estimators_features_ += list(
491
+ itertools.chain.from_iterable(t[1] for t in all_results)
492
+ )
493
+
494
+ if self.oob_score:
495
+ self._set_oob_score(X, y)
496
+
497
+ return self
498
+
499
+ @abstractmethod
500
+ def _set_oob_score(self, X, y):
501
+ """Calculate out of bag predictions and score."""
502
+
503
+ def _validate_y(self, y):
504
+ if len(y.shape) == 1 or y.shape[1] == 1:
505
+ return column_or_1d(y, warn=True)
506
+ return y
507
+
508
+ def _get_estimators_indices(self):
509
+ # Get drawn indices along both sample and feature axes
510
+ for seed in self._seeds:
511
+ # Operations accessing random_state must be performed identically
512
+ # to those in `_parallel_build_estimators()`
513
+ feature_indices, sample_indices = _generate_bagging_indices(
514
+ seed,
515
+ self.bootstrap_features,
516
+ self.bootstrap,
517
+ self.n_features_in_,
518
+ self._n_samples,
519
+ self._max_features,
520
+ self._max_samples,
521
+ )
522
+
523
+ yield feature_indices, sample_indices
524
+
525
+ @property
526
+ def estimators_samples_(self):
527
+ """
528
+ The subset of drawn samples for each base estimator.
529
+
530
+ Returns a dynamically generated list of indices identifying
531
+ the samples used for fitting each member of the ensemble, i.e.,
532
+ the in-bag samples.
533
+
534
+ Note: the list is re-created at each call to the property in order
535
+ to reduce the object memory footprint by not storing the sampling
536
+ data. Thus fetching the property may be slower than expected.
537
+ """
538
+ return [sample_indices for _, sample_indices in self._get_estimators_indices()]
539
+
540
+
541
+ class BaggingClassifier(_RoutingNotSupportedMixin, ClassifierMixin, BaseBagging):
542
+ """A Bagging classifier.
543
+
544
+ A Bagging classifier is an ensemble meta-estimator that fits base
545
+ classifiers each on random subsets of the original dataset and then
546
+ aggregate their individual predictions (either by voting or by averaging)
547
+ to form a final prediction. Such a meta-estimator can typically be used as
548
+ a way to reduce the variance of a black-box estimator (e.g., a decision
549
+ tree), by introducing randomization into its construction procedure and
550
+ then making an ensemble out of it.
551
+
552
+ This algorithm encompasses several works from the literature. When random
553
+ subsets of the dataset are drawn as random subsets of the samples, then
554
+ this algorithm is known as Pasting [1]_. If samples are drawn with
555
+ replacement, then the method is known as Bagging [2]_. When random subsets
556
+ of the dataset are drawn as random subsets of the features, then the method
557
+ is known as Random Subspaces [3]_. Finally, when base estimators are built
558
+ on subsets of both samples and features, then the method is known as
559
+ Random Patches [4]_.
560
+
561
+ Read more in the :ref:`User Guide <bagging>`.
562
+
563
+ .. versionadded:: 0.15
564
+
565
+ Parameters
566
+ ----------
567
+ estimator : object, default=None
568
+ The base estimator to fit on random subsets of the dataset.
569
+ If None, then the base estimator is a
570
+ :class:`~sklearn.tree.DecisionTreeClassifier`.
571
+
572
+ .. versionadded:: 1.2
573
+ `base_estimator` was renamed to `estimator`.
574
+
575
+ n_estimators : int, default=10
576
+ The number of base estimators in the ensemble.
577
+
578
+ max_samples : int or float, default=1.0
579
+ The number of samples to draw from X to train each base estimator (with
580
+ replacement by default, see `bootstrap` for more details).
581
+
582
+ - If int, then draw `max_samples` samples.
583
+ - If float, then draw `max_samples * X.shape[0]` samples.
584
+
585
+ max_features : int or float, default=1.0
586
+ The number of features to draw from X to train each base estimator (
587
+ without replacement by default, see `bootstrap_features` for more
588
+ details).
589
+
590
+ - If int, then draw `max_features` features.
591
+ - If float, then draw `max(1, int(max_features * n_features_in_))` features.
592
+
593
+ bootstrap : bool, default=True
594
+ Whether samples are drawn with replacement. If False, sampling
595
+ without replacement is performed.
596
+
597
+ bootstrap_features : bool, default=False
598
+ Whether features are drawn with replacement.
599
+
600
+ oob_score : bool, default=False
601
+ Whether to use out-of-bag samples to estimate
602
+ the generalization error. Only available if bootstrap=True.
603
+
604
+ warm_start : bool, default=False
605
+ When set to True, reuse the solution of the previous call to fit
606
+ and add more estimators to the ensemble, otherwise, just fit
607
+ a whole new ensemble. See :term:`the Glossary <warm_start>`.
608
+
609
+ .. versionadded:: 0.17
610
+ *warm_start* constructor parameter.
611
+
612
+ n_jobs : int, default=None
613
+ The number of jobs to run in parallel for both :meth:`fit` and
614
+ :meth:`predict`. ``None`` means 1 unless in a
615
+ :obj:`joblib.parallel_backend` context. ``-1`` means using all
616
+ processors. See :term:`Glossary <n_jobs>` for more details.
617
+
618
+ random_state : int, RandomState instance or None, default=None
619
+ Controls the random resampling of the original dataset
620
+ (sample wise and feature wise).
621
+ If the base estimator accepts a `random_state` attribute, a different
622
+ seed is generated for each instance in the ensemble.
623
+ Pass an int for reproducible output across multiple function calls.
624
+ See :term:`Glossary <random_state>`.
625
+
626
+ verbose : int, default=0
627
+ Controls the verbosity when fitting and predicting.
628
+
629
+ Attributes
630
+ ----------
631
+ estimator_ : estimator
632
+ The base estimator from which the ensemble is grown.
633
+
634
+ .. versionadded:: 1.2
635
+ `base_estimator_` was renamed to `estimator_`.
636
+
637
+ n_features_in_ : int
638
+ Number of features seen during :term:`fit`.
639
+
640
+ .. versionadded:: 0.24
641
+
642
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
643
+ Names of features seen during :term:`fit`. Defined only when `X`
644
+ has feature names that are all strings.
645
+
646
+ .. versionadded:: 1.0
647
+
648
+ estimators_ : list of estimators
649
+ The collection of fitted base estimators.
650
+
651
+ estimators_samples_ : list of arrays
652
+ The subset of drawn samples (i.e., the in-bag samples) for each base
653
+ estimator. Each subset is defined by an array of the indices selected.
654
+
655
+ estimators_features_ : list of arrays
656
+ The subset of drawn features for each base estimator.
657
+
658
+ classes_ : ndarray of shape (n_classes,)
659
+ The classes labels.
660
+
661
+ n_classes_ : int or list
662
+ The number of classes.
663
+
664
+ oob_score_ : float
665
+ Score of the training dataset obtained using an out-of-bag estimate.
666
+ This attribute exists only when ``oob_score`` is True.
667
+
668
+ oob_decision_function_ : ndarray of shape (n_samples, n_classes)
669
+ Decision function computed with out-of-bag estimate on the training
670
+ set. If n_estimators is small it might be possible that a data point
671
+ was never left out during the bootstrap. In this case,
672
+ `oob_decision_function_` might contain NaN. This attribute exists
673
+ only when ``oob_score`` is True.
674
+
675
+ See Also
676
+ --------
677
+ BaggingRegressor : A Bagging regressor.
678
+
679
+ References
680
+ ----------
681
+
682
+ .. [1] L. Breiman, "Pasting small votes for classification in large
683
+ databases and on-line", Machine Learning, 36(1), 85-103, 1999.
684
+
685
+ .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
686
+ 1996.
687
+
688
+ .. [3] T. Ho, "The random subspace method for constructing decision
689
+ forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
690
+ 1998.
691
+
692
+ .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
693
+ Learning and Knowledge Discovery in Databases, 346-361, 2012.
694
+
695
+ Examples
696
+ --------
697
+ >>> from sklearn.svm import SVC
698
+ >>> from sklearn.ensemble import BaggingClassifier
699
+ >>> from sklearn.datasets import make_classification
700
+ >>> X, y = make_classification(n_samples=100, n_features=4,
701
+ ... n_informative=2, n_redundant=0,
702
+ ... random_state=0, shuffle=False)
703
+ >>> clf = BaggingClassifier(estimator=SVC(),
704
+ ... n_estimators=10, random_state=0).fit(X, y)
705
+ >>> clf.predict([[0, 0, 0, 0]])
706
+ array([1])
707
+ """
708
+
709
+ def __init__(
710
+ self,
711
+ estimator=None,
712
+ n_estimators=10,
713
+ *,
714
+ max_samples=1.0,
715
+ max_features=1.0,
716
+ bootstrap=True,
717
+ bootstrap_features=False,
718
+ oob_score=False,
719
+ warm_start=False,
720
+ n_jobs=None,
721
+ random_state=None,
722
+ verbose=0,
723
+ ):
724
+ super().__init__(
725
+ estimator=estimator,
726
+ n_estimators=n_estimators,
727
+ max_samples=max_samples,
728
+ max_features=max_features,
729
+ bootstrap=bootstrap,
730
+ bootstrap_features=bootstrap_features,
731
+ oob_score=oob_score,
732
+ warm_start=warm_start,
733
+ n_jobs=n_jobs,
734
+ random_state=random_state,
735
+ verbose=verbose,
736
+ )
737
+
738
+ def _validate_estimator(self):
739
+ """Check the estimator and set the estimator_ attribute."""
740
+ super()._validate_estimator(default=DecisionTreeClassifier())
741
+
742
+ def _set_oob_score(self, X, y):
743
+ n_samples = y.shape[0]
744
+ n_classes_ = self.n_classes_
745
+
746
+ predictions = np.zeros((n_samples, n_classes_))
747
+
748
+ for estimator, samples, features in zip(
749
+ self.estimators_, self.estimators_samples_, self.estimators_features_
750
+ ):
751
+ # Create mask for OOB samples
752
+ mask = ~indices_to_mask(samples, n_samples)
753
+
754
+ if hasattr(estimator, "predict_proba"):
755
+ predictions[mask, :] += estimator.predict_proba(
756
+ (X[mask, :])[:, features]
757
+ )
758
+
759
+ else:
760
+ p = estimator.predict((X[mask, :])[:, features])
761
+ j = 0
762
+
763
+ for i in range(n_samples):
764
+ if mask[i]:
765
+ predictions[i, p[j]] += 1
766
+ j += 1
767
+
768
+ if (predictions.sum(axis=1) == 0).any():
769
+ warn(
770
+ "Some inputs do not have OOB scores. "
771
+ "This probably means too few estimators were used "
772
+ "to compute any reliable oob estimates."
773
+ )
774
+
775
+ oob_decision_function = predictions / predictions.sum(axis=1)[:, np.newaxis]
776
+ oob_score = accuracy_score(y, np.argmax(predictions, axis=1))
777
+
778
+ self.oob_decision_function_ = oob_decision_function
779
+ self.oob_score_ = oob_score
780
+
781
+ def _validate_y(self, y):
782
+ y = column_or_1d(y, warn=True)
783
+ check_classification_targets(y)
784
+ self.classes_, y = np.unique(y, return_inverse=True)
785
+ self.n_classes_ = len(self.classes_)
786
+
787
+ return y
788
+
789
+ def predict(self, X):
790
+ """Predict class for X.
791
+
792
+ The predicted class of an input sample is computed as the class with
793
+ the highest mean predicted probability. If base estimators do not
794
+ implement a ``predict_proba`` method, then it resorts to voting.
795
+
796
+ Parameters
797
+ ----------
798
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
799
+ The training input samples. Sparse matrices are accepted only if
800
+ they are supported by the base estimator.
801
+
802
+ Returns
803
+ -------
804
+ y : ndarray of shape (n_samples,)
805
+ The predicted classes.
806
+ """
807
+ predicted_probabilitiy = self.predict_proba(X)
808
+ return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0)
809
+
810
+ def predict_proba(self, X):
811
+ """Predict class probabilities for X.
812
+
813
+ The predicted class probabilities of an input sample is computed as
814
+ the mean predicted class probabilities of the base estimators in the
815
+ ensemble. If base estimators do not implement a ``predict_proba``
816
+ method, then it resorts to voting and the predicted class probabilities
817
+ of an input sample represents the proportion of estimators predicting
818
+ each class.
819
+
820
+ Parameters
821
+ ----------
822
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
823
+ The training input samples. Sparse matrices are accepted only if
824
+ they are supported by the base estimator.
825
+
826
+ Returns
827
+ -------
828
+ p : ndarray of shape (n_samples, n_classes)
829
+ The class probabilities of the input samples. The order of the
830
+ classes corresponds to that in the attribute :term:`classes_`.
831
+ """
832
+ check_is_fitted(self)
833
+ # Check data
834
+ X = self._validate_data(
835
+ X,
836
+ accept_sparse=["csr", "csc"],
837
+ dtype=None,
838
+ force_all_finite=False,
839
+ reset=False,
840
+ )
841
+
842
+ # Parallel loop
843
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
844
+
845
+ all_proba = Parallel(
846
+ n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
847
+ )(
848
+ delayed(_parallel_predict_proba)(
849
+ self.estimators_[starts[i] : starts[i + 1]],
850
+ self.estimators_features_[starts[i] : starts[i + 1]],
851
+ X,
852
+ self.n_classes_,
853
+ )
854
+ for i in range(n_jobs)
855
+ )
856
+
857
+ # Reduce
858
+ proba = sum(all_proba) / self.n_estimators
859
+
860
+ return proba
861
+
862
+ def predict_log_proba(self, X):
863
+ """Predict class log-probabilities for X.
864
+
865
+ The predicted class log-probabilities of an input sample is computed as
866
+ the log of the mean predicted class probabilities of the base
867
+ estimators in the ensemble.
868
+
869
+ Parameters
870
+ ----------
871
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
872
+ The training input samples. Sparse matrices are accepted only if
873
+ they are supported by the base estimator.
874
+
875
+ Returns
876
+ -------
877
+ p : ndarray of shape (n_samples, n_classes)
878
+ The class log-probabilities of the input samples. The order of the
879
+ classes corresponds to that in the attribute :term:`classes_`.
880
+ """
881
+ check_is_fitted(self)
882
+ if hasattr(self.estimator_, "predict_log_proba"):
883
+ # Check data
884
+ X = self._validate_data(
885
+ X,
886
+ accept_sparse=["csr", "csc"],
887
+ dtype=None,
888
+ force_all_finite=False,
889
+ reset=False,
890
+ )
891
+
892
+ # Parallel loop
893
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
894
+
895
+ all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
896
+ delayed(_parallel_predict_log_proba)(
897
+ self.estimators_[starts[i] : starts[i + 1]],
898
+ self.estimators_features_[starts[i] : starts[i + 1]],
899
+ X,
900
+ self.n_classes_,
901
+ )
902
+ for i in range(n_jobs)
903
+ )
904
+
905
+ # Reduce
906
+ log_proba = all_log_proba[0]
907
+
908
+ for j in range(1, len(all_log_proba)):
909
+ log_proba = np.logaddexp(log_proba, all_log_proba[j])
910
+
911
+ log_proba -= np.log(self.n_estimators)
912
+
913
+ else:
914
+ log_proba = np.log(self.predict_proba(X))
915
+
916
+ return log_proba
917
+
918
+ @available_if(_estimator_has("decision_function"))
919
+ def decision_function(self, X):
920
+ """Average of the decision functions of the base classifiers.
921
+
922
+ Parameters
923
+ ----------
924
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
925
+ The training input samples. Sparse matrices are accepted only if
926
+ they are supported by the base estimator.
927
+
928
+ Returns
929
+ -------
930
+ score : ndarray of shape (n_samples, k)
931
+ The decision function of the input samples. The columns correspond
932
+ to the classes in sorted order, as they appear in the attribute
933
+ ``classes_``. Regression and binary classification are special
934
+ cases with ``k == 1``, otherwise ``k==n_classes``.
935
+ """
936
+ check_is_fitted(self)
937
+
938
+ # Check data
939
+ X = self._validate_data(
940
+ X,
941
+ accept_sparse=["csr", "csc"],
942
+ dtype=None,
943
+ force_all_finite=False,
944
+ reset=False,
945
+ )
946
+
947
+ # Parallel loop
948
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
949
+
950
+ all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
951
+ delayed(_parallel_decision_function)(
952
+ self.estimators_[starts[i] : starts[i + 1]],
953
+ self.estimators_features_[starts[i] : starts[i + 1]],
954
+ X,
955
+ )
956
+ for i in range(n_jobs)
957
+ )
958
+
959
+ # Reduce
960
+ decisions = sum(all_decisions) / self.n_estimators
961
+
962
+ return decisions
963
+
964
+ def _more_tags(self):
965
+ if self.estimator is None:
966
+ estimator = DecisionTreeClassifier()
967
+ else:
968
+ estimator = self.estimator
969
+
970
+ return {"allow_nan": _safe_tags(estimator, "allow_nan")}
971
+
972
+
973
+ class BaggingRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseBagging):
974
+ """A Bagging regressor.
975
+
976
+ A Bagging regressor is an ensemble meta-estimator that fits base
977
+ regressors each on random subsets of the original dataset and then
978
+ aggregate their individual predictions (either by voting or by averaging)
979
+ to form a final prediction. Such a meta-estimator can typically be used as
980
+ a way to reduce the variance of a black-box estimator (e.g., a decision
981
+ tree), by introducing randomization into its construction procedure and
982
+ then making an ensemble out of it.
983
+
984
+ This algorithm encompasses several works from the literature. When random
985
+ subsets of the dataset are drawn as random subsets of the samples, then
986
+ this algorithm is known as Pasting [1]_. If samples are drawn with
987
+ replacement, then the method is known as Bagging [2]_. When random subsets
988
+ of the dataset are drawn as random subsets of the features, then the method
989
+ is known as Random Subspaces [3]_. Finally, when base estimators are built
990
+ on subsets of both samples and features, then the method is known as
991
+ Random Patches [4]_.
992
+
993
+ Read more in the :ref:`User Guide <bagging>`.
994
+
995
+ .. versionadded:: 0.15
996
+
997
+ Parameters
998
+ ----------
999
+ estimator : object, default=None
1000
+ The base estimator to fit on random subsets of the dataset.
1001
+ If None, then the base estimator is a
1002
+ :class:`~sklearn.tree.DecisionTreeRegressor`.
1003
+
1004
+ .. versionadded:: 1.2
1005
+ `base_estimator` was renamed to `estimator`.
1006
+
1007
+ n_estimators : int, default=10
1008
+ The number of base estimators in the ensemble.
1009
+
1010
+ max_samples : int or float, default=1.0
1011
+ The number of samples to draw from X to train each base estimator (with
1012
+ replacement by default, see `bootstrap` for more details).
1013
+
1014
+ - If int, then draw `max_samples` samples.
1015
+ - If float, then draw `max_samples * X.shape[0]` samples.
1016
+
1017
+ max_features : int or float, default=1.0
1018
+ The number of features to draw from X to train each base estimator (
1019
+ without replacement by default, see `bootstrap_features` for more
1020
+ details).
1021
+
1022
+ - If int, then draw `max_features` features.
1023
+ - If float, then draw `max(1, int(max_features * n_features_in_))` features.
1024
+
1025
+ bootstrap : bool, default=True
1026
+ Whether samples are drawn with replacement. If False, sampling
1027
+ without replacement is performed.
1028
+
1029
+ bootstrap_features : bool, default=False
1030
+ Whether features are drawn with replacement.
1031
+
1032
+ oob_score : bool, default=False
1033
+ Whether to use out-of-bag samples to estimate
1034
+ the generalization error. Only available if bootstrap=True.
1035
+
1036
+ warm_start : bool, default=False
1037
+ When set to True, reuse the solution of the previous call to fit
1038
+ and add more estimators to the ensemble, otherwise, just fit
1039
+ a whole new ensemble. See :term:`the Glossary <warm_start>`.
1040
+
1041
+ n_jobs : int, default=None
1042
+ The number of jobs to run in parallel for both :meth:`fit` and
1043
+ :meth:`predict`. ``None`` means 1 unless in a
1044
+ :obj:`joblib.parallel_backend` context. ``-1`` means using all
1045
+ processors. See :term:`Glossary <n_jobs>` for more details.
1046
+
1047
+ random_state : int, RandomState instance or None, default=None
1048
+ Controls the random resampling of the original dataset
1049
+ (sample wise and feature wise).
1050
+ If the base estimator accepts a `random_state` attribute, a different
1051
+ seed is generated for each instance in the ensemble.
1052
+ Pass an int for reproducible output across multiple function calls.
1053
+ See :term:`Glossary <random_state>`.
1054
+
1055
+ verbose : int, default=0
1056
+ Controls the verbosity when fitting and predicting.
1057
+
1058
+ Attributes
1059
+ ----------
1060
+ estimator_ : estimator
1061
+ The base estimator from which the ensemble is grown.
1062
+
1063
+ .. versionadded:: 1.2
1064
+ `base_estimator_` was renamed to `estimator_`.
1065
+
1066
+ n_features_in_ : int
1067
+ Number of features seen during :term:`fit`.
1068
+
1069
+ .. versionadded:: 0.24
1070
+
1071
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1072
+ Names of features seen during :term:`fit`. Defined only when `X`
1073
+ has feature names that are all strings.
1074
+
1075
+ .. versionadded:: 1.0
1076
+
1077
+ estimators_ : list of estimators
1078
+ The collection of fitted sub-estimators.
1079
+
1080
+ estimators_samples_ : list of arrays
1081
+ The subset of drawn samples (i.e., the in-bag samples) for each base
1082
+ estimator. Each subset is defined by an array of the indices selected.
1083
+
1084
+ estimators_features_ : list of arrays
1085
+ The subset of drawn features for each base estimator.
1086
+
1087
+ oob_score_ : float
1088
+ Score of the training dataset obtained using an out-of-bag estimate.
1089
+ This attribute exists only when ``oob_score`` is True.
1090
+
1091
+ oob_prediction_ : ndarray of shape (n_samples,)
1092
+ Prediction computed with out-of-bag estimate on the training
1093
+ set. If n_estimators is small it might be possible that a data point
1094
+ was never left out during the bootstrap. In this case,
1095
+ `oob_prediction_` might contain NaN. This attribute exists only
1096
+ when ``oob_score`` is True.
1097
+
1098
+ See Also
1099
+ --------
1100
+ BaggingClassifier : A Bagging classifier.
1101
+
1102
+ References
1103
+ ----------
1104
+
1105
+ .. [1] L. Breiman, "Pasting small votes for classification in large
1106
+ databases and on-line", Machine Learning, 36(1), 85-103, 1999.
1107
+
1108
+ .. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1109
+ 1996.
1110
+
1111
+ .. [3] T. Ho, "The random subspace method for constructing decision
1112
+ forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1113
+ 1998.
1114
+
1115
+ .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
1116
+ Learning and Knowledge Discovery in Databases, 346-361, 2012.
1117
+
1118
+ Examples
1119
+ --------
1120
+ >>> from sklearn.svm import SVR
1121
+ >>> from sklearn.ensemble import BaggingRegressor
1122
+ >>> from sklearn.datasets import make_regression
1123
+ >>> X, y = make_regression(n_samples=100, n_features=4,
1124
+ ... n_informative=2, n_targets=1,
1125
+ ... random_state=0, shuffle=False)
1126
+ >>> regr = BaggingRegressor(estimator=SVR(),
1127
+ ... n_estimators=10, random_state=0).fit(X, y)
1128
+ >>> regr.predict([[0, 0, 0, 0]])
1129
+ array([-2.8720...])
1130
+ """
1131
+
1132
+ def __init__(
1133
+ self,
1134
+ estimator=None,
1135
+ n_estimators=10,
1136
+ *,
1137
+ max_samples=1.0,
1138
+ max_features=1.0,
1139
+ bootstrap=True,
1140
+ bootstrap_features=False,
1141
+ oob_score=False,
1142
+ warm_start=False,
1143
+ n_jobs=None,
1144
+ random_state=None,
1145
+ verbose=0,
1146
+ ):
1147
+ super().__init__(
1148
+ estimator=estimator,
1149
+ n_estimators=n_estimators,
1150
+ max_samples=max_samples,
1151
+ max_features=max_features,
1152
+ bootstrap=bootstrap,
1153
+ bootstrap_features=bootstrap_features,
1154
+ oob_score=oob_score,
1155
+ warm_start=warm_start,
1156
+ n_jobs=n_jobs,
1157
+ random_state=random_state,
1158
+ verbose=verbose,
1159
+ )
1160
+
1161
+ def predict(self, X):
1162
+ """Predict regression target for X.
1163
+
1164
+ The predicted regression target of an input sample is computed as the
1165
+ mean predicted regression targets of the estimators in the ensemble.
1166
+
1167
+ Parameters
1168
+ ----------
1169
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1170
+ The training input samples. Sparse matrices are accepted only if
1171
+ they are supported by the base estimator.
1172
+
1173
+ Returns
1174
+ -------
1175
+ y : ndarray of shape (n_samples,)
1176
+ The predicted values.
1177
+ """
1178
+ check_is_fitted(self)
1179
+ # Check data
1180
+ X = self._validate_data(
1181
+ X,
1182
+ accept_sparse=["csr", "csc"],
1183
+ dtype=None,
1184
+ force_all_finite=False,
1185
+ reset=False,
1186
+ )
1187
+
1188
+ # Parallel loop
1189
+ n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
1190
+
1191
+ all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
1192
+ delayed(_parallel_predict_regression)(
1193
+ self.estimators_[starts[i] : starts[i + 1]],
1194
+ self.estimators_features_[starts[i] : starts[i + 1]],
1195
+ X,
1196
+ )
1197
+ for i in range(n_jobs)
1198
+ )
1199
+
1200
+ # Reduce
1201
+ y_hat = sum(all_y_hat) / self.n_estimators
1202
+
1203
+ return y_hat
1204
+
1205
+ def _validate_estimator(self):
1206
+ """Check the estimator and set the estimator_ attribute."""
1207
+ super()._validate_estimator(default=DecisionTreeRegressor())
1208
+
1209
+ def _set_oob_score(self, X, y):
1210
+ n_samples = y.shape[0]
1211
+
1212
+ predictions = np.zeros((n_samples,))
1213
+ n_predictions = np.zeros((n_samples,))
1214
+
1215
+ for estimator, samples, features in zip(
1216
+ self.estimators_, self.estimators_samples_, self.estimators_features_
1217
+ ):
1218
+ # Create mask for OOB samples
1219
+ mask = ~indices_to_mask(samples, n_samples)
1220
+
1221
+ predictions[mask] += estimator.predict((X[mask, :])[:, features])
1222
+ n_predictions[mask] += 1
1223
+
1224
+ if (n_predictions == 0).any():
1225
+ warn(
1226
+ "Some inputs do not have OOB scores. "
1227
+ "This probably means too few estimators were used "
1228
+ "to compute any reliable oob estimates."
1229
+ )
1230
+ n_predictions[n_predictions == 0] = 1
1231
+
1232
+ predictions /= n_predictions
1233
+
1234
+ self.oob_prediction_ = predictions
1235
+ self.oob_score_ = r2_score(y, predictions)
1236
+
1237
+ def _more_tags(self):
1238
+ if self.estimator is None:
1239
+ estimator = DecisionTreeRegressor()
1240
+ else:
1241
+ estimator = self.estimator
1242
+ return {"allow_nan": _safe_tags(estimator, "allow_nan")}
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_base.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for ensemble-based estimators."""
2
+
3
+ # Authors: Gilles Louppe
4
+ # License: BSD 3 clause
5
+
6
+ from abc import ABCMeta, abstractmethod
7
+ from typing import List
8
+
9
+ import numpy as np
10
+ from joblib import effective_n_jobs
11
+
12
+ from ..base import BaseEstimator, MetaEstimatorMixin, clone, is_classifier, is_regressor
13
+ from ..utils import Bunch, _print_elapsed_time, check_random_state
14
+ from ..utils._tags import _safe_tags
15
+ from ..utils.metaestimators import _BaseComposition
16
+
17
+
18
+ def _fit_single_estimator(
19
+ estimator, X, y, sample_weight=None, message_clsname=None, message=None
20
+ ):
21
+ """Private function used to fit an estimator within a job."""
22
+ if sample_weight is not None:
23
+ try:
24
+ with _print_elapsed_time(message_clsname, message):
25
+ estimator.fit(X, y, sample_weight=sample_weight)
26
+ except TypeError as exc:
27
+ if "unexpected keyword argument 'sample_weight'" in str(exc):
28
+ raise TypeError(
29
+ "Underlying estimator {} does not support sample weights.".format(
30
+ estimator.__class__.__name__
31
+ )
32
+ ) from exc
33
+ raise
34
+ else:
35
+ with _print_elapsed_time(message_clsname, message):
36
+ estimator.fit(X, y)
37
+ return estimator
38
+
39
+
40
+ def _set_random_states(estimator, random_state=None):
41
+ """Set fixed random_state parameters for an estimator.
42
+
43
+ Finds all parameters ending ``random_state`` and sets them to integers
44
+ derived from ``random_state``.
45
+
46
+ Parameters
47
+ ----------
48
+ estimator : estimator supporting get/set_params
49
+ Estimator with potential randomness managed by random_state
50
+ parameters.
51
+
52
+ random_state : int, RandomState instance or None, default=None
53
+ Pseudo-random number generator to control the generation of the random
54
+ integers. Pass an int for reproducible output across multiple function
55
+ calls.
56
+ See :term:`Glossary <random_state>`.
57
+
58
+ Notes
59
+ -----
60
+ This does not necessarily set *all* ``random_state`` attributes that
61
+ control an estimator's randomness, only those accessible through
62
+ ``estimator.get_params()``. ``random_state``s not controlled include
63
+ those belonging to:
64
+
65
+ * cross-validation splitters
66
+ * ``scipy.stats`` rvs
67
+ """
68
+ random_state = check_random_state(random_state)
69
+ to_set = {}
70
+ for key in sorted(estimator.get_params(deep=True)):
71
+ if key == "random_state" or key.endswith("__random_state"):
72
+ to_set[key] = random_state.randint(np.iinfo(np.int32).max)
73
+
74
+ if to_set:
75
+ estimator.set_params(**to_set)
76
+
77
+
78
+ class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
79
+ """Base class for all ensemble classes.
80
+
81
+ Warning: This class should not be used directly. Use derived classes
82
+ instead.
83
+
84
+ Parameters
85
+ ----------
86
+ estimator : object
87
+ The base estimator from which the ensemble is built.
88
+
89
+ n_estimators : int, default=10
90
+ The number of estimators in the ensemble.
91
+
92
+ estimator_params : list of str, default=tuple()
93
+ The list of attributes to use as parameters when instantiating a
94
+ new base estimator. If none are given, default parameters are used.
95
+
96
+ Attributes
97
+ ----------
98
+ estimator_ : estimator
99
+ The base estimator from which the ensemble is grown.
100
+
101
+ estimators_ : list of estimators
102
+ The collection of fitted base estimators.
103
+ """
104
+
105
+ # overwrite _required_parameters from MetaEstimatorMixin
106
+ _required_parameters: List[str] = []
107
+
108
+ @abstractmethod
109
+ def __init__(
110
+ self,
111
+ estimator=None,
112
+ *,
113
+ n_estimators=10,
114
+ estimator_params=tuple(),
115
+ ):
116
+ # Set parameters
117
+ self.estimator = estimator
118
+ self.n_estimators = n_estimators
119
+ self.estimator_params = estimator_params
120
+
121
+ # Don't instantiate estimators now! Parameters of estimator might
122
+ # still change. Eg., when grid-searching with the nested object syntax.
123
+ # self.estimators_ needs to be filled by the derived classes in fit.
124
+
125
+ def _validate_estimator(self, default=None):
126
+ """Check the base estimator.
127
+
128
+ Sets the `estimator_` attributes.
129
+ """
130
+ if self.estimator is not None:
131
+ self.estimator_ = self.estimator
132
+ else:
133
+ self.estimator_ = default
134
+
135
+ def _make_estimator(self, append=True, random_state=None):
136
+ """Make and configure a copy of the `estimator_` attribute.
137
+
138
+ Warning: This method should be used to properly instantiate new
139
+ sub-estimators.
140
+ """
141
+ estimator = clone(self.estimator_)
142
+ estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
143
+
144
+ if random_state is not None:
145
+ _set_random_states(estimator, random_state)
146
+
147
+ if append:
148
+ self.estimators_.append(estimator)
149
+
150
+ return estimator
151
+
152
+ def __len__(self):
153
+ """Return the number of estimators in the ensemble."""
154
+ return len(self.estimators_)
155
+
156
+ def __getitem__(self, index):
157
+ """Return the index'th estimator in the ensemble."""
158
+ return self.estimators_[index]
159
+
160
+ def __iter__(self):
161
+ """Return iterator over estimators in the ensemble."""
162
+ return iter(self.estimators_)
163
+
164
+
165
+ def _partition_estimators(n_estimators, n_jobs):
166
+ """Private function used to partition estimators between jobs."""
167
+ # Compute the number of jobs
168
+ n_jobs = min(effective_n_jobs(n_jobs), n_estimators)
169
+
170
+ # Partition estimators between jobs
171
+ n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int)
172
+ n_estimators_per_job[: n_estimators % n_jobs] += 1
173
+ starts = np.cumsum(n_estimators_per_job)
174
+
175
+ return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
176
+
177
+
178
+ class _BaseHeterogeneousEnsemble(
179
+ MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta
180
+ ):
181
+ """Base class for heterogeneous ensemble of learners.
182
+
183
+ Parameters
184
+ ----------
185
+ estimators : list of (str, estimator) tuples
186
+ The ensemble of estimators to use in the ensemble. Each element of the
187
+ list is defined as a tuple of string (i.e. name of the estimator) and
188
+ an estimator instance. An estimator can be set to `'drop'` using
189
+ `set_params`.
190
+
191
+ Attributes
192
+ ----------
193
+ estimators_ : list of estimators
194
+ The elements of the estimators parameter, having been fitted on the
195
+ training data. If an estimator has been set to `'drop'`, it will not
196
+ appear in `estimators_`.
197
+ """
198
+
199
+ _required_parameters = ["estimators"]
200
+
201
+ @property
202
+ def named_estimators(self):
203
+ """Dictionary to access any fitted sub-estimators by name.
204
+
205
+ Returns
206
+ -------
207
+ :class:`~sklearn.utils.Bunch`
208
+ """
209
+ return Bunch(**dict(self.estimators))
210
+
211
+ @abstractmethod
212
+ def __init__(self, estimators):
213
+ self.estimators = estimators
214
+
215
+ def _validate_estimators(self):
216
+ if len(self.estimators) == 0:
217
+ raise ValueError(
218
+ "Invalid 'estimators' attribute, 'estimators' should be a "
219
+ "non-empty list of (string, estimator) tuples."
220
+ )
221
+ names, estimators = zip(*self.estimators)
222
+ # defined by MetaEstimatorMixin
223
+ self._validate_names(names)
224
+
225
+ has_estimator = any(est != "drop" for est in estimators)
226
+ if not has_estimator:
227
+ raise ValueError(
228
+ "All estimators are dropped. At least one is required "
229
+ "to be an estimator."
230
+ )
231
+
232
+ is_estimator_type = is_classifier if is_classifier(self) else is_regressor
233
+
234
+ for est in estimators:
235
+ if est != "drop" and not is_estimator_type(est):
236
+ raise ValueError(
237
+ "The estimator {} should be a {}.".format(
238
+ est.__class__.__name__, is_estimator_type.__name__[3:]
239
+ )
240
+ )
241
+
242
+ return names, estimators
243
+
244
+ def set_params(self, **params):
245
+ """
246
+ Set the parameters of an estimator from the ensemble.
247
+
248
+ Valid parameter keys can be listed with `get_params()`. Note that you
249
+ can directly set the parameters of the estimators contained in
250
+ `estimators`.
251
+
252
+ Parameters
253
+ ----------
254
+ **params : keyword arguments
255
+ Specific parameters using e.g.
256
+ `set_params(parameter_name=new_value)`. In addition, to setting the
257
+ parameters of the estimator, the individual estimator of the
258
+ estimators can also be set, or can be removed by setting them to
259
+ 'drop'.
260
+
261
+ Returns
262
+ -------
263
+ self : object
264
+ Estimator instance.
265
+ """
266
+ super()._set_params("estimators", **params)
267
+ return self
268
+
269
+ def get_params(self, deep=True):
270
+ """
271
+ Get the parameters of an estimator from the ensemble.
272
+
273
+ Returns the parameters given in the constructor as well as the
274
+ estimators contained within the `estimators` parameter.
275
+
276
+ Parameters
277
+ ----------
278
+ deep : bool, default=True
279
+ Setting it to True gets the various estimators and the parameters
280
+ of the estimators as well.
281
+
282
+ Returns
283
+ -------
284
+ params : dict
285
+ Parameter and estimator names mapped to their values or parameter
286
+ names mapped to their values.
287
+ """
288
+ return super()._get_params("estimators", deep=deep)
289
+
290
+ def _more_tags(self):
291
+ try:
292
+ allow_nan = all(
293
+ _safe_tags(est[1])["allow_nan"] if est[1] != "drop" else True
294
+ for est in self.estimators
295
+ )
296
+ except Exception:
297
+ # If `estimators` does not comply with our API (list of tuples) then it will
298
+ # fail. In this case, we assume that `allow_nan` is False but the parameter
299
+ # validation will raise an error during `fit`.
300
+ allow_nan = False
301
+ return {"preserves_dtype": [], "allow_nan": allow_nan}
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_forest.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_gb.py ADDED
@@ -0,0 +1,2168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gradient Boosted Regression Trees.
2
+
3
+ This module contains methods for fitting gradient boosted regression trees for
4
+ both classification and regression.
5
+
6
+ The module structure is the following:
7
+
8
+ - The ``BaseGradientBoosting`` base class implements a common ``fit`` method
9
+ for all the estimators in the module. Regression and classification
10
+ only differ in the concrete ``LossFunction`` used.
11
+
12
+ - ``GradientBoostingClassifier`` implements gradient boosting for
13
+ classification problems.
14
+
15
+ - ``GradientBoostingRegressor`` implements gradient boosting for
16
+ regression problems.
17
+ """
18
+
19
+ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
20
+ # Arnaud Joly, Jacob Schreiber
21
+ # License: BSD 3 clause
22
+
23
+ import math
24
+ import warnings
25
+ from abc import ABCMeta, abstractmethod
26
+ from numbers import Integral, Real
27
+ from time import time
28
+
29
+ import numpy as np
30
+ from scipy.sparse import csc_matrix, csr_matrix, issparse
31
+
32
+ from .._loss.loss import (
33
+ _LOSSES,
34
+ AbsoluteError,
35
+ ExponentialLoss,
36
+ HalfBinomialLoss,
37
+ HalfMultinomialLoss,
38
+ HalfSquaredError,
39
+ HuberLoss,
40
+ PinballLoss,
41
+ )
42
+ from ..base import ClassifierMixin, RegressorMixin, _fit_context, is_classifier
43
+ from ..dummy import DummyClassifier, DummyRegressor
44
+ from ..exceptions import NotFittedError
45
+ from ..model_selection import train_test_split
46
+ from ..preprocessing import LabelEncoder
47
+ from ..tree import DecisionTreeRegressor
48
+ from ..tree._tree import DOUBLE, DTYPE, TREE_LEAF
49
+ from ..utils import check_array, check_random_state, column_or_1d
50
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
51
+ from ..utils.multiclass import check_classification_targets
52
+ from ..utils.stats import _weighted_percentile
53
+ from ..utils.validation import _check_sample_weight, check_is_fitted
54
+ from ._base import BaseEnsemble
55
+ from ._gradient_boosting import _random_sample_mask, predict_stage, predict_stages
56
+
57
+ _LOSSES = _LOSSES.copy()
58
+ _LOSSES.update(
59
+ {
60
+ "quantile": PinballLoss,
61
+ "huber": HuberLoss,
62
+ }
63
+ )
64
+
65
+
66
+ def _safe_divide(numerator, denominator):
67
+ """Prevents overflow and division by zero."""
68
+ # This is used for classifiers where the denominator might become zero exatly.
69
+ # For instance for log loss, HalfBinomialLoss, if proba=0 or proba=1 exactly, then
70
+ # denominator = hessian = 0, and we should set the node value in the line search to
71
+ # zero as there is no improvement of the loss possible.
72
+ # For numerical safety, we do this already for extremely tiny values.
73
+ if abs(denominator) < 1e-150:
74
+ return 0.0
75
+ else:
76
+ # Cast to Python float to trigger Python errors, e.g. ZeroDivisionError,
77
+ # without relying on `np.errstate` that is not supported by Pyodide.
78
+ result = float(numerator) / float(denominator)
79
+ # Cast to Python float to trigger a ZeroDivisionError without relying
80
+ # on `np.errstate` that is not supported by Pyodide.
81
+ result = float(numerator) / float(denominator)
82
+ if math.isinf(result):
83
+ warnings.warn("overflow encountered in _safe_divide", RuntimeWarning)
84
+ return result
85
+
86
+
87
+ def _init_raw_predictions(X, estimator, loss, use_predict_proba):
88
+ """Return the initial raw predictions.
89
+
90
+ Parameters
91
+ ----------
92
+ X : ndarray of shape (n_samples, n_features)
93
+ The data array.
94
+ estimator : object
95
+ The estimator to use to compute the predictions.
96
+ loss : BaseLoss
97
+ An instance of a loss function class.
98
+ use_predict_proba : bool
99
+ Whether estimator.predict_proba is used instead of estimator.predict.
100
+
101
+ Returns
102
+ -------
103
+ raw_predictions : ndarray of shape (n_samples, K)
104
+ The initial raw predictions. K is equal to 1 for binary
105
+ classification and regression, and equal to the number of classes
106
+ for multiclass classification. ``raw_predictions`` is casted
107
+ into float64.
108
+ """
109
+ # TODO: Use loss.fit_intercept_only where appropriate instead of
110
+ # DummyRegressor which is the default given by the `init` parameter,
111
+ # see also _init_state.
112
+ if use_predict_proba:
113
+ # Our parameter validation, set via _fit_context and _parameter_constraints
114
+ # already guarantees that estimator has a predict_proba method.
115
+ predictions = estimator.predict_proba(X)
116
+ if not loss.is_multiclass:
117
+ predictions = predictions[:, 1] # probability of positive class
118
+ eps = np.finfo(np.float32).eps # FIXME: This is quite large!
119
+ predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64)
120
+ else:
121
+ predictions = estimator.predict(X).astype(np.float64)
122
+
123
+ if predictions.ndim == 1:
124
+ return loss.link.link(predictions).reshape(-1, 1)
125
+ else:
126
+ return loss.link.link(predictions)
127
+
128
+
129
+ def _update_terminal_regions(
130
+ loss,
131
+ tree,
132
+ X,
133
+ y,
134
+ neg_gradient,
135
+ raw_prediction,
136
+ sample_weight,
137
+ sample_mask,
138
+ learning_rate=0.1,
139
+ k=0,
140
+ ):
141
+ """Update the leaf values to be predicted by the tree and raw_prediction.
142
+
143
+ The current raw predictions of the model (of this stage) are updated.
144
+
145
+ Additionally, the terminal regions (=leaves) of the given tree are updated as well.
146
+ This corresponds to the line search step in "Greedy Function Approximation" by
147
+ Friedman, Algorithm 1 step 5.
148
+
149
+ Update equals:
150
+ argmin_{x} loss(y_true, raw_prediction_old + x * tree.value)
151
+
152
+ For non-trivial cases like the Binomial loss, the update has no closed formula and
153
+ is an approximation, again, see the Friedman paper.
154
+
155
+ Also note that the update formula for the SquaredError is the identity. Therefore,
156
+ in this case, the leaf values don't need an update and only the raw_predictions are
157
+ updated (with the learning rate included).
158
+
159
+ Parameters
160
+ ----------
161
+ loss : BaseLoss
162
+ tree : tree.Tree
163
+ The tree object.
164
+ X : ndarray of shape (n_samples, n_features)
165
+ The data array.
166
+ y : ndarray of shape (n_samples,)
167
+ The target labels.
168
+ neg_gradient : ndarray of shape (n_samples,)
169
+ The negative gradient.
170
+ raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration)
171
+ The raw predictions (i.e. values from the tree leaves) of the
172
+ tree ensemble at iteration ``i - 1``.
173
+ sample_weight : ndarray of shape (n_samples,)
174
+ The weight of each sample.
175
+ sample_mask : ndarray of shape (n_samples,)
176
+ The sample mask to be used.
177
+ learning_rate : float, default=0.1
178
+ Learning rate shrinks the contribution of each tree by
179
+ ``learning_rate``.
180
+ k : int, default=0
181
+ The index of the estimator being updated.
182
+ """
183
+ # compute leaf for each sample in ``X``.
184
+ terminal_regions = tree.apply(X)
185
+
186
+ if not isinstance(loss, HalfSquaredError):
187
+ # mask all which are not in sample mask.
188
+ masked_terminal_regions = terminal_regions.copy()
189
+ masked_terminal_regions[~sample_mask] = -1
190
+
191
+ if isinstance(loss, HalfBinomialLoss):
192
+
193
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
194
+ # Make a single Newton-Raphson step, see "Additive Logistic Regression:
195
+ # A Statistical View of Boosting" FHT00 and note that we use a slightly
196
+ # different version (factor 2) of "F" with proba=expit(raw_prediction).
197
+ # Our node estimate is given by:
198
+ # sum(w * (y - prob)) / sum(w * prob * (1 - prob))
199
+ # we take advantage that: y - prob = neg_gradient
200
+ neg_g = neg_gradient.take(indices, axis=0)
201
+ prob = y_ - neg_g
202
+ # numerator = negative gradient = y - prob
203
+ numerator = np.average(neg_g, weights=sw)
204
+ # denominator = hessian = prob * (1 - prob)
205
+ denominator = np.average(prob * (1 - prob), weights=sw)
206
+ return _safe_divide(numerator, denominator)
207
+
208
+ elif isinstance(loss, HalfMultinomialLoss):
209
+
210
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
211
+ # we take advantage that: y - prob = neg_gradient
212
+ neg_g = neg_gradient.take(indices, axis=0)
213
+ prob = y_ - neg_g
214
+ K = loss.n_classes
215
+ # numerator = negative gradient * (k - 1) / k
216
+ # Note: The factor (k - 1)/k appears in the original papers "Greedy
217
+ # Function Approximation" by Friedman and "Additive Logistic
218
+ # Regression" by Friedman, Hastie, Tibshirani. This factor is, however,
219
+ # wrong or at least arbitrary as it directly multiplies the
220
+ # learning_rate. We keep it for backward compatibility.
221
+ numerator = np.average(neg_g, weights=sw)
222
+ numerator *= (K - 1) / K
223
+ # denominator = (diagonal) hessian = prob * (1 - prob)
224
+ denominator = np.average(prob * (1 - prob), weights=sw)
225
+ return _safe_divide(numerator, denominator)
226
+
227
+ elif isinstance(loss, ExponentialLoss):
228
+
229
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
230
+ neg_g = neg_gradient.take(indices, axis=0)
231
+ # numerator = negative gradient = y * exp(-raw) - (1-y) * exp(raw)
232
+ numerator = np.average(neg_g, weights=sw)
233
+ # denominator = hessian = y * exp(-raw) + (1-y) * exp(raw)
234
+ # if y=0: hessian = exp(raw) = -neg_g
235
+ # y=1: hessian = exp(-raw) = neg_g
236
+ hessian = neg_g.copy()
237
+ hessian[y_ == 0] *= -1
238
+ denominator = np.average(hessian, weights=sw)
239
+ return _safe_divide(numerator, denominator)
240
+
241
+ else:
242
+
243
+ def compute_update(y_, indices, neg_gradient, raw_prediction, k):
244
+ return loss.fit_intercept_only(
245
+ y_true=y_ - raw_prediction[indices, k],
246
+ sample_weight=sw,
247
+ )
248
+
249
+ # update each leaf (= perform line search)
250
+ for leaf in np.nonzero(tree.children_left == TREE_LEAF)[0]:
251
+ indices = np.nonzero(masked_terminal_regions == leaf)[
252
+ 0
253
+ ] # of terminal regions
254
+ y_ = y.take(indices, axis=0)
255
+ sw = None if sample_weight is None else sample_weight[indices]
256
+ update = compute_update(y_, indices, neg_gradient, raw_prediction, k)
257
+
258
+ # TODO: Multiply here by learning rate instead of everywhere else.
259
+ tree.value[leaf, 0, 0] = update
260
+
261
+ # update predictions (both in-bag and out-of-bag)
262
+ raw_prediction[:, k] += learning_rate * tree.value[:, 0, 0].take(
263
+ terminal_regions, axis=0
264
+ )
265
+
266
+
267
+ def set_huber_delta(loss, y_true, raw_prediction, sample_weight=None):
268
+ """Calculate and set self.closs.delta based on self.quantile."""
269
+ abserr = np.abs(y_true - raw_prediction.squeeze())
270
+ # sample_weight is always a ndarray, never None.
271
+ delta = _weighted_percentile(abserr, sample_weight, 100 * loss.quantile)
272
+ loss.closs.delta = float(delta)
273
+
274
+
275
+ class VerboseReporter:
276
+ """Reports verbose output to stdout.
277
+
278
+ Parameters
279
+ ----------
280
+ verbose : int
281
+ Verbosity level. If ``verbose==1`` output is printed once in a while
282
+ (when iteration mod verbose_mod is zero).; if larger than 1 then output
283
+ is printed for each update.
284
+ """
285
+
286
+ def __init__(self, verbose):
287
+ self.verbose = verbose
288
+
289
+ def init(self, est, begin_at_stage=0):
290
+ """Initialize reporter
291
+
292
+ Parameters
293
+ ----------
294
+ est : Estimator
295
+ The estimator
296
+
297
+ begin_at_stage : int, default=0
298
+ stage at which to begin reporting
299
+ """
300
+ # header fields and line format str
301
+ header_fields = ["Iter", "Train Loss"]
302
+ verbose_fmt = ["{iter:>10d}", "{train_score:>16.4f}"]
303
+ # do oob?
304
+ if est.subsample < 1:
305
+ header_fields.append("OOB Improve")
306
+ verbose_fmt.append("{oob_impr:>16.4f}")
307
+ header_fields.append("Remaining Time")
308
+ verbose_fmt.append("{remaining_time:>16s}")
309
+
310
+ # print the header line
311
+ print(("%10s " + "%16s " * (len(header_fields) - 1)) % tuple(header_fields))
312
+
313
+ self.verbose_fmt = " ".join(verbose_fmt)
314
+ # plot verbose info each time i % verbose_mod == 0
315
+ self.verbose_mod = 1
316
+ self.start_time = time()
317
+ self.begin_at_stage = begin_at_stage
318
+
319
+ def update(self, j, est):
320
+ """Update reporter with new iteration.
321
+
322
+ Parameters
323
+ ----------
324
+ j : int
325
+ The new iteration.
326
+ est : Estimator
327
+ The estimator.
328
+ """
329
+ do_oob = est.subsample < 1
330
+ # we need to take into account if we fit additional estimators.
331
+ i = j - self.begin_at_stage # iteration relative to the start iter
332
+ if (i + 1) % self.verbose_mod == 0:
333
+ oob_impr = est.oob_improvement_[j] if do_oob else 0
334
+ remaining_time = (
335
+ (est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)
336
+ )
337
+ if remaining_time > 60:
338
+ remaining_time = "{0:.2f}m".format(remaining_time / 60.0)
339
+ else:
340
+ remaining_time = "{0:.2f}s".format(remaining_time)
341
+ print(
342
+ self.verbose_fmt.format(
343
+ iter=j + 1,
344
+ train_score=est.train_score_[j],
345
+ oob_impr=oob_impr,
346
+ remaining_time=remaining_time,
347
+ )
348
+ )
349
+ if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
350
+ # adjust verbose frequency (powers of 10)
351
+ self.verbose_mod *= 10
352
+
353
+
354
+ class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
355
+ """Abstract base class for Gradient Boosting."""
356
+
357
+ _parameter_constraints: dict = {
358
+ **DecisionTreeRegressor._parameter_constraints,
359
+ "learning_rate": [Interval(Real, 0.0, None, closed="left")],
360
+ "n_estimators": [Interval(Integral, 1, None, closed="left")],
361
+ "criterion": [StrOptions({"friedman_mse", "squared_error"})],
362
+ "subsample": [Interval(Real, 0.0, 1.0, closed="right")],
363
+ "verbose": ["verbose"],
364
+ "warm_start": ["boolean"],
365
+ "validation_fraction": [Interval(Real, 0.0, 1.0, closed="neither")],
366
+ "n_iter_no_change": [Interval(Integral, 1, None, closed="left"), None],
367
+ "tol": [Interval(Real, 0.0, None, closed="left")],
368
+ }
369
+ _parameter_constraints.pop("splitter")
370
+ _parameter_constraints.pop("monotonic_cst")
371
+
372
+ @abstractmethod
373
+ def __init__(
374
+ self,
375
+ *,
376
+ loss,
377
+ learning_rate,
378
+ n_estimators,
379
+ criterion,
380
+ min_samples_split,
381
+ min_samples_leaf,
382
+ min_weight_fraction_leaf,
383
+ max_depth,
384
+ min_impurity_decrease,
385
+ init,
386
+ subsample,
387
+ max_features,
388
+ ccp_alpha,
389
+ random_state,
390
+ alpha=0.9,
391
+ verbose=0,
392
+ max_leaf_nodes=None,
393
+ warm_start=False,
394
+ validation_fraction=0.1,
395
+ n_iter_no_change=None,
396
+ tol=1e-4,
397
+ ):
398
+ self.n_estimators = n_estimators
399
+ self.learning_rate = learning_rate
400
+ self.loss = loss
401
+ self.criterion = criterion
402
+ self.min_samples_split = min_samples_split
403
+ self.min_samples_leaf = min_samples_leaf
404
+ self.min_weight_fraction_leaf = min_weight_fraction_leaf
405
+ self.subsample = subsample
406
+ self.max_features = max_features
407
+ self.max_depth = max_depth
408
+ self.min_impurity_decrease = min_impurity_decrease
409
+ self.ccp_alpha = ccp_alpha
410
+ self.init = init
411
+ self.random_state = random_state
412
+ self.alpha = alpha
413
+ self.verbose = verbose
414
+ self.max_leaf_nodes = max_leaf_nodes
415
+ self.warm_start = warm_start
416
+ self.validation_fraction = validation_fraction
417
+ self.n_iter_no_change = n_iter_no_change
418
+ self.tol = tol
419
+
420
+ @abstractmethod
421
+ def _encode_y(self, y=None, sample_weight=None):
422
+ """Called by fit to validate and encode y."""
423
+
424
+ @abstractmethod
425
+ def _get_loss(self, sample_weight):
426
+ """Get loss object from sklearn._loss.loss."""
427
+
428
+ def _fit_stage(
429
+ self,
430
+ i,
431
+ X,
432
+ y,
433
+ raw_predictions,
434
+ sample_weight,
435
+ sample_mask,
436
+ random_state,
437
+ X_csc=None,
438
+ X_csr=None,
439
+ ):
440
+ """Fit another stage of ``n_trees_per_iteration_`` trees."""
441
+ original_y = y
442
+
443
+ if isinstance(self._loss, HuberLoss):
444
+ set_huber_delta(
445
+ loss=self._loss,
446
+ y_true=y,
447
+ raw_prediction=raw_predictions,
448
+ sample_weight=sample_weight,
449
+ )
450
+ # TODO: Without oob, i.e. with self.subsample = 1.0, we could call
451
+ # self._loss.loss_gradient and use it to set train_score_.
452
+ # But note that train_score_[i] is the score AFTER fitting the i-th tree.
453
+ # Note: We need the negative gradient!
454
+ neg_gradient = -self._loss.gradient(
455
+ y_true=y,
456
+ raw_prediction=raw_predictions,
457
+ sample_weight=None, # We pass sample_weights to the tree directly.
458
+ )
459
+ # 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
460
+ # on neg_gradient to simplify the loop over n_trees_per_iteration_.
461
+ if neg_gradient.ndim == 1:
462
+ neg_g_view = neg_gradient.reshape((-1, 1))
463
+ else:
464
+ neg_g_view = neg_gradient
465
+
466
+ for k in range(self.n_trees_per_iteration_):
467
+ if self._loss.is_multiclass:
468
+ y = np.array(original_y == k, dtype=np.float64)
469
+
470
+ # induce regression tree on the negative gradient
471
+ tree = DecisionTreeRegressor(
472
+ criterion=self.criterion,
473
+ splitter="best",
474
+ max_depth=self.max_depth,
475
+ min_samples_split=self.min_samples_split,
476
+ min_samples_leaf=self.min_samples_leaf,
477
+ min_weight_fraction_leaf=self.min_weight_fraction_leaf,
478
+ min_impurity_decrease=self.min_impurity_decrease,
479
+ max_features=self.max_features,
480
+ max_leaf_nodes=self.max_leaf_nodes,
481
+ random_state=random_state,
482
+ ccp_alpha=self.ccp_alpha,
483
+ )
484
+
485
+ if self.subsample < 1.0:
486
+ # no inplace multiplication!
487
+ sample_weight = sample_weight * sample_mask.astype(np.float64)
488
+
489
+ X = X_csc if X_csc is not None else X
490
+ tree.fit(
491
+ X, neg_g_view[:, k], sample_weight=sample_weight, check_input=False
492
+ )
493
+
494
+ # update tree leaves
495
+ X_for_tree_update = X_csr if X_csr is not None else X
496
+ _update_terminal_regions(
497
+ self._loss,
498
+ tree.tree_,
499
+ X_for_tree_update,
500
+ y,
501
+ neg_g_view[:, k],
502
+ raw_predictions,
503
+ sample_weight,
504
+ sample_mask,
505
+ learning_rate=self.learning_rate,
506
+ k=k,
507
+ )
508
+
509
+ # add tree to ensemble
510
+ self.estimators_[i, k] = tree
511
+
512
+ return raw_predictions
513
+
514
+ def _set_max_features(self):
515
+ """Set self.max_features_."""
516
+ if isinstance(self.max_features, str):
517
+ if self.max_features == "auto":
518
+ if is_classifier(self):
519
+ max_features = max(1, int(np.sqrt(self.n_features_in_)))
520
+ else:
521
+ max_features = self.n_features_in_
522
+ elif self.max_features == "sqrt":
523
+ max_features = max(1, int(np.sqrt(self.n_features_in_)))
524
+ else: # self.max_features == "log2"
525
+ max_features = max(1, int(np.log2(self.n_features_in_)))
526
+ elif self.max_features is None:
527
+ max_features = self.n_features_in_
528
+ elif isinstance(self.max_features, Integral):
529
+ max_features = self.max_features
530
+ else: # float
531
+ max_features = max(1, int(self.max_features * self.n_features_in_))
532
+
533
+ self.max_features_ = max_features
534
+
535
+ def _init_state(self):
536
+ """Initialize model state and allocate model state data structures."""
537
+
538
+ self.init_ = self.init
539
+ if self.init_ is None:
540
+ if is_classifier(self):
541
+ self.init_ = DummyClassifier(strategy="prior")
542
+ elif isinstance(self._loss, (AbsoluteError, HuberLoss)):
543
+ self.init_ = DummyRegressor(strategy="quantile", quantile=0.5)
544
+ elif isinstance(self._loss, PinballLoss):
545
+ self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha)
546
+ else:
547
+ self.init_ = DummyRegressor(strategy="mean")
548
+
549
+ self.estimators_ = np.empty(
550
+ (self.n_estimators, self.n_trees_per_iteration_), dtype=object
551
+ )
552
+ self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
553
+ # do oob?
554
+ if self.subsample < 1.0:
555
+ self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64)
556
+ self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64)
557
+ self.oob_score_ = np.nan
558
+
559
+ def _clear_state(self):
560
+ """Clear the state of the gradient boosting model."""
561
+ if hasattr(self, "estimators_"):
562
+ self.estimators_ = np.empty((0, 0), dtype=object)
563
+ if hasattr(self, "train_score_"):
564
+ del self.train_score_
565
+ if hasattr(self, "oob_improvement_"):
566
+ del self.oob_improvement_
567
+ if hasattr(self, "oob_scores_"):
568
+ del self.oob_scores_
569
+ if hasattr(self, "oob_score_"):
570
+ del self.oob_score_
571
+ if hasattr(self, "init_"):
572
+ del self.init_
573
+ if hasattr(self, "_rng"):
574
+ del self._rng
575
+
576
+ def _resize_state(self):
577
+ """Add additional ``n_estimators`` entries to all attributes."""
578
+ # self.n_estimators is the number of additional est to fit
579
+ total_n_estimators = self.n_estimators
580
+ if total_n_estimators < self.estimators_.shape[0]:
581
+ raise ValueError(
582
+ "resize with smaller n_estimators %d < %d"
583
+ % (total_n_estimators, self.estimators_[0])
584
+ )
585
+
586
+ self.estimators_ = np.resize(
587
+ self.estimators_, (total_n_estimators, self.n_trees_per_iteration_)
588
+ )
589
+ self.train_score_ = np.resize(self.train_score_, total_n_estimators)
590
+ if self.subsample < 1 or hasattr(self, "oob_improvement_"):
591
+ # if do oob resize arrays or create new if not available
592
+ if hasattr(self, "oob_improvement_"):
593
+ self.oob_improvement_ = np.resize(
594
+ self.oob_improvement_, total_n_estimators
595
+ )
596
+ self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
597
+ self.oob_score_ = np.nan
598
+ else:
599
+ self.oob_improvement_ = np.zeros(
600
+ (total_n_estimators,), dtype=np.float64
601
+ )
602
+ self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
603
+ self.oob_score_ = np.nan
604
+
605
+ def _is_fitted(self):
606
+ return len(getattr(self, "estimators_", [])) > 0
607
+
608
+ def _check_initialized(self):
609
+ """Check that the estimator is initialized, raising an error if not."""
610
+ check_is_fitted(self)
611
+
612
+ @_fit_context(
613
+ # GradientBoosting*.init is not validated yet
614
+ prefer_skip_nested_validation=False
615
+ )
616
+ def fit(self, X, y, sample_weight=None, monitor=None):
617
+ """Fit the gradient boosting model.
618
+
619
+ Parameters
620
+ ----------
621
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
622
+ The input samples. Internally, it will be converted to
623
+ ``dtype=np.float32`` and if a sparse matrix is provided
624
+ to a sparse ``csr_matrix``.
625
+
626
+ y : array-like of shape (n_samples,)
627
+ Target values (strings or integers in classification, real numbers
628
+ in regression)
629
+ For classification, labels must correspond to classes.
630
+
631
+ sample_weight : array-like of shape (n_samples,), default=None
632
+ Sample weights. If None, then samples are equally weighted. Splits
633
+ that would create child nodes with net zero or negative weight are
634
+ ignored while searching for a split in each node. In the case of
635
+ classification, splits are also ignored if they would result in any
636
+ single class carrying a negative weight in either child node.
637
+
638
+ monitor : callable, default=None
639
+ The monitor is called after each iteration with the current
640
+ iteration, a reference to the estimator and the local variables of
641
+ ``_fit_stages`` as keyword arguments ``callable(i, self,
642
+ locals())``. If the callable returns ``True`` the fitting procedure
643
+ is stopped. The monitor can be used for various things such as
644
+ computing held-out estimates, early stopping, model introspect, and
645
+ snapshotting.
646
+
647
+ Returns
648
+ -------
649
+ self : object
650
+ Fitted estimator.
651
+ """
652
+ if not self.warm_start:
653
+ self._clear_state()
654
+
655
+ # Check input
656
+ # Since check_array converts both X and y to the same dtype, but the
657
+ # trees use different types for X and y, checking them separately.
658
+
659
+ X, y = self._validate_data(
660
+ X, y, accept_sparse=["csr", "csc", "coo"], dtype=DTYPE, multi_output=True
661
+ )
662
+ sample_weight_is_none = sample_weight is None
663
+ sample_weight = _check_sample_weight(sample_weight, X)
664
+ if sample_weight_is_none:
665
+ y = self._encode_y(y=y, sample_weight=None)
666
+ else:
667
+ y = self._encode_y(y=y, sample_weight=sample_weight)
668
+ y = column_or_1d(y, warn=True) # TODO: Is this still required?
669
+
670
+ self._set_max_features()
671
+
672
+ # self.loss is guaranteed to be a string
673
+ self._loss = self._get_loss(sample_weight=sample_weight)
674
+
675
+ if self.n_iter_no_change is not None:
676
+ stratify = y if is_classifier(self) else None
677
+ (
678
+ X_train,
679
+ X_val,
680
+ y_train,
681
+ y_val,
682
+ sample_weight_train,
683
+ sample_weight_val,
684
+ ) = train_test_split(
685
+ X,
686
+ y,
687
+ sample_weight,
688
+ random_state=self.random_state,
689
+ test_size=self.validation_fraction,
690
+ stratify=stratify,
691
+ )
692
+ if is_classifier(self):
693
+ if self.n_classes_ != np.unique(y_train).shape[0]:
694
+ # We choose to error here. The problem is that the init
695
+ # estimator would be trained on y, which has some missing
696
+ # classes now, so its predictions would not have the
697
+ # correct shape.
698
+ raise ValueError(
699
+ "The training data after the early stopping split "
700
+ "is missing some classes. Try using another random "
701
+ "seed."
702
+ )
703
+ else:
704
+ X_train, y_train, sample_weight_train = X, y, sample_weight
705
+ X_val = y_val = sample_weight_val = None
706
+
707
+ n_samples = X_train.shape[0]
708
+
709
+ # First time calling fit.
710
+ if not self._is_fitted():
711
+ # init state
712
+ self._init_state()
713
+
714
+ # fit initial model and initialize raw predictions
715
+ if self.init_ == "zero":
716
+ raw_predictions = np.zeros(
717
+ shape=(n_samples, self.n_trees_per_iteration_),
718
+ dtype=np.float64,
719
+ )
720
+ else:
721
+ # XXX clean this once we have a support_sample_weight tag
722
+ if sample_weight_is_none:
723
+ self.init_.fit(X_train, y_train)
724
+ else:
725
+ msg = (
726
+ "The initial estimator {} does not support sample "
727
+ "weights.".format(self.init_.__class__.__name__)
728
+ )
729
+ try:
730
+ self.init_.fit(
731
+ X_train, y_train, sample_weight=sample_weight_train
732
+ )
733
+ except TypeError as e:
734
+ if "unexpected keyword argument 'sample_weight'" in str(e):
735
+ # regular estimator without SW support
736
+ raise ValueError(msg) from e
737
+ else: # regular estimator whose input checking failed
738
+ raise
739
+ except ValueError as e:
740
+ if (
741
+ "pass parameters to specific steps of "
742
+ "your pipeline using the "
743
+ "stepname__parameter"
744
+ in str(e)
745
+ ): # pipeline
746
+ raise ValueError(msg) from e
747
+ else: # regular estimator whose input checking failed
748
+ raise
749
+
750
+ raw_predictions = _init_raw_predictions(
751
+ X_train, self.init_, self._loss, is_classifier(self)
752
+ )
753
+
754
+ begin_at_stage = 0
755
+
756
+ # The rng state must be preserved if warm_start is True
757
+ self._rng = check_random_state(self.random_state)
758
+
759
+ # warm start: this is not the first time fit was called
760
+ else:
761
+ # add more estimators to fitted model
762
+ # invariant: warm_start = True
763
+ if self.n_estimators < self.estimators_.shape[0]:
764
+ raise ValueError(
765
+ "n_estimators=%d must be larger or equal to "
766
+ "estimators_.shape[0]=%d when "
767
+ "warm_start==True" % (self.n_estimators, self.estimators_.shape[0])
768
+ )
769
+ begin_at_stage = self.estimators_.shape[0]
770
+ # The requirements of _raw_predict
771
+ # are more constrained than fit. It accepts only CSR
772
+ # matrices. Finite values have already been checked in _validate_data.
773
+ X_train = check_array(
774
+ X_train,
775
+ dtype=DTYPE,
776
+ order="C",
777
+ accept_sparse="csr",
778
+ force_all_finite=False,
779
+ )
780
+ raw_predictions = self._raw_predict(X_train)
781
+ self._resize_state()
782
+
783
+ # fit the boosting stages
784
+ n_stages = self._fit_stages(
785
+ X_train,
786
+ y_train,
787
+ raw_predictions,
788
+ sample_weight_train,
789
+ self._rng,
790
+ X_val,
791
+ y_val,
792
+ sample_weight_val,
793
+ begin_at_stage,
794
+ monitor,
795
+ )
796
+
797
+ # change shape of arrays after fit (early-stopping or additional ests)
798
+ if n_stages != self.estimators_.shape[0]:
799
+ self.estimators_ = self.estimators_[:n_stages]
800
+ self.train_score_ = self.train_score_[:n_stages]
801
+ if hasattr(self, "oob_improvement_"):
802
+ # OOB scores were computed
803
+ self.oob_improvement_ = self.oob_improvement_[:n_stages]
804
+ self.oob_scores_ = self.oob_scores_[:n_stages]
805
+ self.oob_score_ = self.oob_scores_[-1]
806
+ self.n_estimators_ = n_stages
807
+ return self
808
+
809
+ def _fit_stages(
810
+ self,
811
+ X,
812
+ y,
813
+ raw_predictions,
814
+ sample_weight,
815
+ random_state,
816
+ X_val,
817
+ y_val,
818
+ sample_weight_val,
819
+ begin_at_stage=0,
820
+ monitor=None,
821
+ ):
822
+ """Iteratively fits the stages.
823
+
824
+ For each stage it computes the progress (OOB, train score)
825
+ and delegates to ``_fit_stage``.
826
+ Returns the number of stages fit; might differ from ``n_estimators``
827
+ due to early stopping.
828
+ """
829
+ n_samples = X.shape[0]
830
+ do_oob = self.subsample < 1.0
831
+ sample_mask = np.ones((n_samples,), dtype=bool)
832
+ n_inbag = max(1, int(self.subsample * n_samples))
833
+
834
+ if self.verbose:
835
+ verbose_reporter = VerboseReporter(verbose=self.verbose)
836
+ verbose_reporter.init(self, begin_at_stage)
837
+
838
+ X_csc = csc_matrix(X) if issparse(X) else None
839
+ X_csr = csr_matrix(X) if issparse(X) else None
840
+
841
+ if self.n_iter_no_change is not None:
842
+ loss_history = np.full(self.n_iter_no_change, np.inf)
843
+ # We create a generator to get the predictions for X_val after
844
+ # the addition of each successive stage
845
+ y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
846
+
847
+ # Older versions of GBT had its own loss functions. With the new common
848
+ # private loss function submodule _loss, we often are a factor of 2
849
+ # away from the old version. Here we keep backward compatibility for
850
+ # oob_scores_ and oob_improvement_, even if the old way is quite
851
+ # inconsistent (sometimes the gradient is half the gradient, sometimes
852
+ # not).
853
+ if isinstance(
854
+ self._loss,
855
+ (
856
+ HalfSquaredError,
857
+ HalfBinomialLoss,
858
+ ),
859
+ ):
860
+ factor = 2
861
+ else:
862
+ factor = 1
863
+
864
+ # perform boosting iterations
865
+ i = begin_at_stage
866
+ for i in range(begin_at_stage, self.n_estimators):
867
+ # subsampling
868
+ if do_oob:
869
+ sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
870
+ y_oob_masked = y[~sample_mask]
871
+ sample_weight_oob_masked = sample_weight[~sample_mask]
872
+ if i == 0: # store the initial loss to compute the OOB score
873
+ initial_loss = factor * self._loss(
874
+ y_true=y_oob_masked,
875
+ raw_prediction=raw_predictions[~sample_mask],
876
+ sample_weight=sample_weight_oob_masked,
877
+ )
878
+
879
+ # fit next stage of trees
880
+ raw_predictions = self._fit_stage(
881
+ i,
882
+ X,
883
+ y,
884
+ raw_predictions,
885
+ sample_weight,
886
+ sample_mask,
887
+ random_state,
888
+ X_csc=X_csc,
889
+ X_csr=X_csr,
890
+ )
891
+
892
+ # track loss
893
+ if do_oob:
894
+ self.train_score_[i] = factor * self._loss(
895
+ y_true=y[sample_mask],
896
+ raw_prediction=raw_predictions[sample_mask],
897
+ sample_weight=sample_weight[sample_mask],
898
+ )
899
+ self.oob_scores_[i] = factor * self._loss(
900
+ y_true=y_oob_masked,
901
+ raw_prediction=raw_predictions[~sample_mask],
902
+ sample_weight=sample_weight_oob_masked,
903
+ )
904
+ previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
905
+ self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
906
+ self.oob_score_ = self.oob_scores_[-1]
907
+ else:
908
+ # no need to fancy index w/ no subsampling
909
+ self.train_score_[i] = factor * self._loss(
910
+ y_true=y,
911
+ raw_prediction=raw_predictions,
912
+ sample_weight=sample_weight,
913
+ )
914
+
915
+ if self.verbose > 0:
916
+ verbose_reporter.update(i, self)
917
+
918
+ if monitor is not None:
919
+ early_stopping = monitor(i, self, locals())
920
+ if early_stopping:
921
+ break
922
+
923
+ # We also provide an early stopping based on the score from
924
+ # validation set (X_val, y_val), if n_iter_no_change is set
925
+ if self.n_iter_no_change is not None:
926
+ # By calling next(y_val_pred_iter), we get the predictions
927
+ # for X_val after the addition of the current stage
928
+ validation_loss = factor * self._loss(
929
+ y_val, next(y_val_pred_iter), sample_weight_val
930
+ )
931
+
932
+ # Require validation_score to be better (less) than at least
933
+ # one of the last n_iter_no_change evaluations
934
+ if np.any(validation_loss + self.tol < loss_history):
935
+ loss_history[i % len(loss_history)] = validation_loss
936
+ else:
937
+ break
938
+
939
+ return i + 1
940
+
941
+ def _make_estimator(self, append=True):
942
+ # we don't need _make_estimator
943
+ raise NotImplementedError()
944
+
945
+ def _raw_predict_init(self, X):
946
+ """Check input and compute raw predictions of the init estimator."""
947
+ self._check_initialized()
948
+ X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
949
+ if self.init_ == "zero":
950
+ raw_predictions = np.zeros(
951
+ shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64
952
+ )
953
+ else:
954
+ raw_predictions = _init_raw_predictions(
955
+ X, self.init_, self._loss, is_classifier(self)
956
+ )
957
+ return raw_predictions
958
+
959
+ def _raw_predict(self, X):
960
+ """Return the sum of the trees raw predictions (+ init estimator)."""
961
+ check_is_fitted(self)
962
+ raw_predictions = self._raw_predict_init(X)
963
+ predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
964
+ return raw_predictions
965
+
966
+ def _staged_raw_predict(self, X, check_input=True):
967
+ """Compute raw predictions of ``X`` for each iteration.
968
+
969
+ This method allows monitoring (i.e. determine error on testing set)
970
+ after each stage.
971
+
972
+ Parameters
973
+ ----------
974
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
975
+ The input samples. Internally, it will be converted to
976
+ ``dtype=np.float32`` and if a sparse matrix is provided
977
+ to a sparse ``csr_matrix``.
978
+
979
+ check_input : bool, default=True
980
+ If False, the input arrays X will not be checked.
981
+
982
+ Returns
983
+ -------
984
+ raw_predictions : generator of ndarray of shape (n_samples, k)
985
+ The raw predictions of the input samples. The order of the
986
+ classes corresponds to that in the attribute :term:`classes_`.
987
+ Regression and binary classification are special cases with
988
+ ``k == 1``, otherwise ``k==n_classes``.
989
+ """
990
+ if check_input:
991
+ X = self._validate_data(
992
+ X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
993
+ )
994
+ raw_predictions = self._raw_predict_init(X)
995
+ for i in range(self.estimators_.shape[0]):
996
+ predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions)
997
+ yield raw_predictions.copy()
998
+
999
+ @property
1000
+ def feature_importances_(self):
1001
+ """The impurity-based feature importances.
1002
+
1003
+ The higher, the more important the feature.
1004
+ The importance of a feature is computed as the (normalized)
1005
+ total reduction of the criterion brought by that feature. It is also
1006
+ known as the Gini importance.
1007
+
1008
+ Warning: impurity-based feature importances can be misleading for
1009
+ high cardinality features (many unique values). See
1010
+ :func:`sklearn.inspection.permutation_importance` as an alternative.
1011
+
1012
+ Returns
1013
+ -------
1014
+ feature_importances_ : ndarray of shape (n_features,)
1015
+ The values of this array sum to 1, unless all trees are single node
1016
+ trees consisting of only the root node, in which case it will be an
1017
+ array of zeros.
1018
+ """
1019
+ self._check_initialized()
1020
+
1021
+ relevant_trees = [
1022
+ tree
1023
+ for stage in self.estimators_
1024
+ for tree in stage
1025
+ if tree.tree_.node_count > 1
1026
+ ]
1027
+ if not relevant_trees:
1028
+ # degenerate case where all trees have only one node
1029
+ return np.zeros(shape=self.n_features_in_, dtype=np.float64)
1030
+
1031
+ relevant_feature_importances = [
1032
+ tree.tree_.compute_feature_importances(normalize=False)
1033
+ for tree in relevant_trees
1034
+ ]
1035
+ avg_feature_importances = np.mean(
1036
+ relevant_feature_importances, axis=0, dtype=np.float64
1037
+ )
1038
+ return avg_feature_importances / np.sum(avg_feature_importances)
1039
+
1040
+ def _compute_partial_dependence_recursion(self, grid, target_features):
1041
+ """Fast partial dependence computation.
1042
+
1043
+ Parameters
1044
+ ----------
1045
+ grid : ndarray of shape (n_samples, n_target_features)
1046
+ The grid points on which the partial dependence should be
1047
+ evaluated.
1048
+ target_features : ndarray of shape (n_target_features,)
1049
+ The set of target features for which the partial dependence
1050
+ should be evaluated.
1051
+
1052
+ Returns
1053
+ -------
1054
+ averaged_predictions : ndarray of shape \
1055
+ (n_trees_per_iteration_, n_samples)
1056
+ The value of the partial dependence function on each grid point.
1057
+ """
1058
+ if self.init is not None:
1059
+ warnings.warn(
1060
+ "Using recursion method with a non-constant init predictor "
1061
+ "will lead to incorrect partial dependence values. "
1062
+ "Got init=%s."
1063
+ % self.init,
1064
+ UserWarning,
1065
+ )
1066
+ grid = np.asarray(grid, dtype=DTYPE, order="C")
1067
+ n_estimators, n_trees_per_stage = self.estimators_.shape
1068
+ averaged_predictions = np.zeros(
1069
+ (n_trees_per_stage, grid.shape[0]), dtype=np.float64, order="C"
1070
+ )
1071
+ for stage in range(n_estimators):
1072
+ for k in range(n_trees_per_stage):
1073
+ tree = self.estimators_[stage, k].tree_
1074
+ tree.compute_partial_dependence(
1075
+ grid, target_features, averaged_predictions[k]
1076
+ )
1077
+ averaged_predictions *= self.learning_rate
1078
+
1079
+ return averaged_predictions
1080
+
1081
+ def apply(self, X):
1082
+ """Apply trees in the ensemble to X, return leaf indices.
1083
+
1084
+ .. versionadded:: 0.17
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1089
+ The input samples. Internally, its dtype will be converted to
1090
+ ``dtype=np.float32``. If a sparse matrix is provided, it will
1091
+ be converted to a sparse ``csr_matrix``.
1092
+
1093
+ Returns
1094
+ -------
1095
+ X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
1096
+ For each datapoint x in X and for each tree in the ensemble,
1097
+ return the index of the leaf x ends up in each estimator.
1098
+ In the case of binary classification n_classes is 1.
1099
+ """
1100
+
1101
+ self._check_initialized()
1102
+ X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
1103
+
1104
+ # n_classes will be equal to 1 in the binary classification or the
1105
+ # regression case.
1106
+ n_estimators, n_classes = self.estimators_.shape
1107
+ leaves = np.zeros((X.shape[0], n_estimators, n_classes))
1108
+
1109
+ for i in range(n_estimators):
1110
+ for j in range(n_classes):
1111
+ estimator = self.estimators_[i, j]
1112
+ leaves[:, i, j] = estimator.apply(X, check_input=False)
1113
+
1114
+ return leaves
1115
+
1116
+
1117
+ class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting):
1118
+ """Gradient Boosting for classification.
1119
+
1120
+ This algorithm builds an additive model in a forward stage-wise fashion; it
1121
+ allows for the optimization of arbitrary differentiable loss functions. In
1122
+ each stage ``n_classes_`` regression trees are fit on the negative gradient
1123
+ of the loss function, e.g. binary or multiclass log loss. Binary
1124
+ classification is a special case where only a single regression tree is
1125
+ induced.
1126
+
1127
+ :class:`sklearn.ensemble.HistGradientBoostingClassifier` is a much faster
1128
+ variant of this algorithm for intermediate datasets (`n_samples >= 10_000`).
1129
+
1130
+ Read more in the :ref:`User Guide <gradient_boosting>`.
1131
+
1132
+ Parameters
1133
+ ----------
1134
+ loss : {'log_loss', 'exponential'}, default='log_loss'
1135
+ The loss function to be optimized. 'log_loss' refers to binomial and
1136
+ multinomial deviance, the same as used in logistic regression.
1137
+ It is a good choice for classification with probabilistic outputs.
1138
+ For loss 'exponential', gradient boosting recovers the AdaBoost algorithm.
1139
+
1140
+ learning_rate : float, default=0.1
1141
+ Learning rate shrinks the contribution of each tree by `learning_rate`.
1142
+ There is a trade-off between learning_rate and n_estimators.
1143
+ Values must be in the range `[0.0, inf)`.
1144
+
1145
+ n_estimators : int, default=100
1146
+ The number of boosting stages to perform. Gradient boosting
1147
+ is fairly robust to over-fitting so a large number usually
1148
+ results in better performance.
1149
+ Values must be in the range `[1, inf)`.
1150
+
1151
+ subsample : float, default=1.0
1152
+ The fraction of samples to be used for fitting the individual base
1153
+ learners. If smaller than 1.0 this results in Stochastic Gradient
1154
+ Boosting. `subsample` interacts with the parameter `n_estimators`.
1155
+ Choosing `subsample < 1.0` leads to a reduction of variance
1156
+ and an increase in bias.
1157
+ Values must be in the range `(0.0, 1.0]`.
1158
+
1159
+ criterion : {'friedman_mse', 'squared_error'}, default='friedman_mse'
1160
+ The function to measure the quality of a split. Supported criteria are
1161
+ 'friedman_mse' for the mean squared error with improvement score by
1162
+ Friedman, 'squared_error' for mean squared error. The default value of
1163
+ 'friedman_mse' is generally the best as it can provide a better
1164
+ approximation in some cases.
1165
+
1166
+ .. versionadded:: 0.18
1167
+
1168
+ min_samples_split : int or float, default=2
1169
+ The minimum number of samples required to split an internal node:
1170
+
1171
+ - If int, values must be in the range `[2, inf)`.
1172
+ - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`
1173
+ will be `ceil(min_samples_split * n_samples)`.
1174
+
1175
+ .. versionchanged:: 0.18
1176
+ Added float values for fractions.
1177
+
1178
+ min_samples_leaf : int or float, default=1
1179
+ The minimum number of samples required to be at a leaf node.
1180
+ A split point at any depth will only be considered if it leaves at
1181
+ least ``min_samples_leaf`` training samples in each of the left and
1182
+ right branches. This may have the effect of smoothing the model,
1183
+ especially in regression.
1184
+
1185
+ - If int, values must be in the range `[1, inf)`.
1186
+ - If float, values must be in the range `(0.0, 1.0)` and `min_samples_leaf`
1187
+ will be `ceil(min_samples_leaf * n_samples)`.
1188
+
1189
+ .. versionchanged:: 0.18
1190
+ Added float values for fractions.
1191
+
1192
+ min_weight_fraction_leaf : float, default=0.0
1193
+ The minimum weighted fraction of the sum total of weights (of all
1194
+ the input samples) required to be at a leaf node. Samples have
1195
+ equal weight when sample_weight is not provided.
1196
+ Values must be in the range `[0.0, 0.5]`.
1197
+
1198
+ max_depth : int or None, default=3
1199
+ Maximum depth of the individual regression estimators. The maximum
1200
+ depth limits the number of nodes in the tree. Tune this parameter
1201
+ for best performance; the best value depends on the interaction
1202
+ of the input variables. If None, then nodes are expanded until
1203
+ all leaves are pure or until all leaves contain less than
1204
+ min_samples_split samples.
1205
+ If int, values must be in the range `[1, inf)`.
1206
+
1207
+ min_impurity_decrease : float, default=0.0
1208
+ A node will be split if this split induces a decrease of the impurity
1209
+ greater than or equal to this value.
1210
+ Values must be in the range `[0.0, inf)`.
1211
+
1212
+ The weighted impurity decrease equation is the following::
1213
+
1214
+ N_t / N * (impurity - N_t_R / N_t * right_impurity
1215
+ - N_t_L / N_t * left_impurity)
1216
+
1217
+ where ``N`` is the total number of samples, ``N_t`` is the number of
1218
+ samples at the current node, ``N_t_L`` is the number of samples in the
1219
+ left child, and ``N_t_R`` is the number of samples in the right child.
1220
+
1221
+ ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
1222
+ if ``sample_weight`` is passed.
1223
+
1224
+ .. versionadded:: 0.19
1225
+
1226
+ init : estimator or 'zero', default=None
1227
+ An estimator object that is used to compute the initial predictions.
1228
+ ``init`` has to provide :term:`fit` and :term:`predict_proba`. If
1229
+ 'zero', the initial raw predictions are set to zero. By default, a
1230
+ ``DummyEstimator`` predicting the classes priors is used.
1231
+
1232
+ random_state : int, RandomState instance or None, default=None
1233
+ Controls the random seed given to each Tree estimator at each
1234
+ boosting iteration.
1235
+ In addition, it controls the random permutation of the features at
1236
+ each split (see Notes for more details).
1237
+ It also controls the random splitting of the training data to obtain a
1238
+ validation set if `n_iter_no_change` is not None.
1239
+ Pass an int for reproducible output across multiple function calls.
1240
+ See :term:`Glossary <random_state>`.
1241
+
1242
+ max_features : {'sqrt', 'log2'}, int or float, default=None
1243
+ The number of features to consider when looking for the best split:
1244
+
1245
+ - If int, values must be in the range `[1, inf)`.
1246
+ - If float, values must be in the range `(0.0, 1.0]` and the features
1247
+ considered at each split will be `max(1, int(max_features * n_features_in_))`.
1248
+ - If 'sqrt', then `max_features=sqrt(n_features)`.
1249
+ - If 'log2', then `max_features=log2(n_features)`.
1250
+ - If None, then `max_features=n_features`.
1251
+
1252
+ Choosing `max_features < n_features` leads to a reduction of variance
1253
+ and an increase in bias.
1254
+
1255
+ Note: the search for a split does not stop until at least one
1256
+ valid partition of the node samples is found, even if it requires to
1257
+ effectively inspect more than ``max_features`` features.
1258
+
1259
+ verbose : int, default=0
1260
+ Enable verbose output. If 1 then it prints progress and performance
1261
+ once in a while (the more trees the lower the frequency). If greater
1262
+ than 1 then it prints progress and performance for every tree.
1263
+ Values must be in the range `[0, inf)`.
1264
+
1265
+ max_leaf_nodes : int, default=None
1266
+ Grow trees with ``max_leaf_nodes`` in best-first fashion.
1267
+ Best nodes are defined as relative reduction in impurity.
1268
+ Values must be in the range `[2, inf)`.
1269
+ If `None`, then unlimited number of leaf nodes.
1270
+
1271
+ warm_start : bool, default=False
1272
+ When set to ``True``, reuse the solution of the previous call to fit
1273
+ and add more estimators to the ensemble, otherwise, just erase the
1274
+ previous solution. See :term:`the Glossary <warm_start>`.
1275
+
1276
+ validation_fraction : float, default=0.1
1277
+ The proportion of training data to set aside as validation set for
1278
+ early stopping. Values must be in the range `(0.0, 1.0)`.
1279
+ Only used if ``n_iter_no_change`` is set to an integer.
1280
+
1281
+ .. versionadded:: 0.20
1282
+
1283
+ n_iter_no_change : int, default=None
1284
+ ``n_iter_no_change`` is used to decide if early stopping will be used
1285
+ to terminate training when validation score is not improving. By
1286
+ default it is set to None to disable early stopping. If set to a
1287
+ number, it will set aside ``validation_fraction`` size of the training
1288
+ data as validation and terminate training when validation score is not
1289
+ improving in all of the previous ``n_iter_no_change`` numbers of
1290
+ iterations. The split is stratified.
1291
+ Values must be in the range `[1, inf)`.
1292
+ See
1293
+ :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`.
1294
+
1295
+ .. versionadded:: 0.20
1296
+
1297
+ tol : float, default=1e-4
1298
+ Tolerance for the early stopping. When the loss is not improving
1299
+ by at least tol for ``n_iter_no_change`` iterations (if set to a
1300
+ number), the training stops.
1301
+ Values must be in the range `[0.0, inf)`.
1302
+
1303
+ .. versionadded:: 0.20
1304
+
1305
+ ccp_alpha : non-negative float, default=0.0
1306
+ Complexity parameter used for Minimal Cost-Complexity Pruning. The
1307
+ subtree with the largest cost complexity that is smaller than
1308
+ ``ccp_alpha`` will be chosen. By default, no pruning is performed.
1309
+ Values must be in the range `[0.0, inf)`.
1310
+ See :ref:`minimal_cost_complexity_pruning` for details.
1311
+
1312
+ .. versionadded:: 0.22
1313
+
1314
+ Attributes
1315
+ ----------
1316
+ n_estimators_ : int
1317
+ The number of estimators as selected by early stopping (if
1318
+ ``n_iter_no_change`` is specified). Otherwise it is set to
1319
+ ``n_estimators``.
1320
+
1321
+ .. versionadded:: 0.20
1322
+
1323
+ n_trees_per_iteration_ : int
1324
+ The number of trees that are built at each iteration. For binary classifiers,
1325
+ this is always 1.
1326
+
1327
+ .. versionadded:: 1.4.0
1328
+
1329
+ feature_importances_ : ndarray of shape (n_features,)
1330
+ The impurity-based feature importances.
1331
+ The higher, the more important the feature.
1332
+ The importance of a feature is computed as the (normalized)
1333
+ total reduction of the criterion brought by that feature. It is also
1334
+ known as the Gini importance.
1335
+
1336
+ Warning: impurity-based feature importances can be misleading for
1337
+ high cardinality features (many unique values). See
1338
+ :func:`sklearn.inspection.permutation_importance` as an alternative.
1339
+
1340
+ oob_improvement_ : ndarray of shape (n_estimators,)
1341
+ The improvement in loss on the out-of-bag samples
1342
+ relative to the previous iteration.
1343
+ ``oob_improvement_[0]`` is the improvement in
1344
+ loss of the first stage over the ``init`` estimator.
1345
+ Only available if ``subsample < 1.0``.
1346
+
1347
+ oob_scores_ : ndarray of shape (n_estimators,)
1348
+ The full history of the loss values on the out-of-bag
1349
+ samples. Only available if `subsample < 1.0`.
1350
+
1351
+ .. versionadded:: 1.3
1352
+
1353
+ oob_score_ : float
1354
+ The last value of the loss on the out-of-bag samples. It is
1355
+ the same as `oob_scores_[-1]`. Only available if `subsample < 1.0`.
1356
+
1357
+ .. versionadded:: 1.3
1358
+
1359
+ train_score_ : ndarray of shape (n_estimators,)
1360
+ The i-th score ``train_score_[i]`` is the loss of the
1361
+ model at iteration ``i`` on the in-bag sample.
1362
+ If ``subsample == 1`` this is the loss on the training data.
1363
+
1364
+ init_ : estimator
1365
+ The estimator that provides the initial predictions. Set via the ``init``
1366
+ argument.
1367
+
1368
+ estimators_ : ndarray of DecisionTreeRegressor of \
1369
+ shape (n_estimators, ``n_trees_per_iteration_``)
1370
+ The collection of fitted sub-estimators. ``n_trees_per_iteration_`` is 1 for
1371
+ binary classification, otherwise ``n_classes``.
1372
+
1373
+ classes_ : ndarray of shape (n_classes,)
1374
+ The classes labels.
1375
+
1376
+ n_features_in_ : int
1377
+ Number of features seen during :term:`fit`.
1378
+
1379
+ .. versionadded:: 0.24
1380
+
1381
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1382
+ Names of features seen during :term:`fit`. Defined only when `X`
1383
+ has feature names that are all strings.
1384
+
1385
+ .. versionadded:: 1.0
1386
+
1387
+ n_classes_ : int
1388
+ The number of classes.
1389
+
1390
+ max_features_ : int
1391
+ The inferred value of max_features.
1392
+
1393
+ See Also
1394
+ --------
1395
+ HistGradientBoostingClassifier : Histogram-based Gradient Boosting
1396
+ Classification Tree.
1397
+ sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
1398
+ RandomForestClassifier : A meta-estimator that fits a number of decision
1399
+ tree classifiers on various sub-samples of the dataset and uses
1400
+ averaging to improve the predictive accuracy and control over-fitting.
1401
+ AdaBoostClassifier : A meta-estimator that begins by fitting a classifier
1402
+ on the original dataset and then fits additional copies of the
1403
+ classifier on the same dataset where the weights of incorrectly
1404
+ classified instances are adjusted such that subsequent classifiers
1405
+ focus more on difficult cases.
1406
+
1407
+ Notes
1408
+ -----
1409
+ The features are always randomly permuted at each split. Therefore,
1410
+ the best found split may vary, even with the same training data and
1411
+ ``max_features=n_features``, if the improvement of the criterion is
1412
+ identical for several splits enumerated during the search of the best
1413
+ split. To obtain a deterministic behaviour during fitting,
1414
+ ``random_state`` has to be fixed.
1415
+
1416
+ References
1417
+ ----------
1418
+ J. Friedman, Greedy Function Approximation: A Gradient Boosting
1419
+ Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
1420
+
1421
+ J. Friedman, Stochastic Gradient Boosting, 1999
1422
+
1423
+ T. Hastie, R. Tibshirani and J. Friedman.
1424
+ Elements of Statistical Learning Ed. 2, Springer, 2009.
1425
+
1426
+ Examples
1427
+ --------
1428
+ The following example shows how to fit a gradient boosting classifier with
1429
+ 100 decision stumps as weak learners.
1430
+
1431
+ >>> from sklearn.datasets import make_hastie_10_2
1432
+ >>> from sklearn.ensemble import GradientBoostingClassifier
1433
+
1434
+ >>> X, y = make_hastie_10_2(random_state=0)
1435
+ >>> X_train, X_test = X[:2000], X[2000:]
1436
+ >>> y_train, y_test = y[:2000], y[2000:]
1437
+
1438
+ >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
1439
+ ... max_depth=1, random_state=0).fit(X_train, y_train)
1440
+ >>> clf.score(X_test, y_test)
1441
+ 0.913...
1442
+ """
1443
+
1444
+ _parameter_constraints: dict = {
1445
+ **BaseGradientBoosting._parameter_constraints,
1446
+ "loss": [StrOptions({"log_loss", "exponential"})],
1447
+ "init": [StrOptions({"zero"}), None, HasMethods(["fit", "predict_proba"])],
1448
+ }
1449
+
1450
+ def __init__(
1451
+ self,
1452
+ *,
1453
+ loss="log_loss",
1454
+ learning_rate=0.1,
1455
+ n_estimators=100,
1456
+ subsample=1.0,
1457
+ criterion="friedman_mse",
1458
+ min_samples_split=2,
1459
+ min_samples_leaf=1,
1460
+ min_weight_fraction_leaf=0.0,
1461
+ max_depth=3,
1462
+ min_impurity_decrease=0.0,
1463
+ init=None,
1464
+ random_state=None,
1465
+ max_features=None,
1466
+ verbose=0,
1467
+ max_leaf_nodes=None,
1468
+ warm_start=False,
1469
+ validation_fraction=0.1,
1470
+ n_iter_no_change=None,
1471
+ tol=1e-4,
1472
+ ccp_alpha=0.0,
1473
+ ):
1474
+ super().__init__(
1475
+ loss=loss,
1476
+ learning_rate=learning_rate,
1477
+ n_estimators=n_estimators,
1478
+ criterion=criterion,
1479
+ min_samples_split=min_samples_split,
1480
+ min_samples_leaf=min_samples_leaf,
1481
+ min_weight_fraction_leaf=min_weight_fraction_leaf,
1482
+ max_depth=max_depth,
1483
+ init=init,
1484
+ subsample=subsample,
1485
+ max_features=max_features,
1486
+ random_state=random_state,
1487
+ verbose=verbose,
1488
+ max_leaf_nodes=max_leaf_nodes,
1489
+ min_impurity_decrease=min_impurity_decrease,
1490
+ warm_start=warm_start,
1491
+ validation_fraction=validation_fraction,
1492
+ n_iter_no_change=n_iter_no_change,
1493
+ tol=tol,
1494
+ ccp_alpha=ccp_alpha,
1495
+ )
1496
+
1497
+ def _encode_y(self, y, sample_weight):
1498
+ # encode classes into 0 ... n_classes - 1 and sets attributes classes_
1499
+ # and n_trees_per_iteration_
1500
+ check_classification_targets(y)
1501
+
1502
+ label_encoder = LabelEncoder()
1503
+ encoded_y_int = label_encoder.fit_transform(y)
1504
+ self.classes_ = label_encoder.classes_
1505
+ n_classes = self.classes_.shape[0]
1506
+ # only 1 tree for binary classification. For multiclass classification,
1507
+ # we build 1 tree per class.
1508
+ self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes
1509
+ encoded_y = encoded_y_int.astype(float, copy=False)
1510
+
1511
+ # From here on, it is additional to the HGBT case.
1512
+ # expose n_classes_ attribute
1513
+ self.n_classes_ = n_classes
1514
+ if sample_weight is None:
1515
+ n_trim_classes = n_classes
1516
+ else:
1517
+ n_trim_classes = np.count_nonzero(np.bincount(encoded_y_int, sample_weight))
1518
+
1519
+ if n_trim_classes < 2:
1520
+ raise ValueError(
1521
+ "y contains %d class after sample_weight "
1522
+ "trimmed classes with zero weights, while a "
1523
+ "minimum of 2 classes are required." % n_trim_classes
1524
+ )
1525
+ return encoded_y
1526
+
1527
+ def _get_loss(self, sample_weight):
1528
+ if self.loss == "log_loss":
1529
+ if self.n_classes_ == 2:
1530
+ return HalfBinomialLoss(sample_weight=sample_weight)
1531
+ else:
1532
+ return HalfMultinomialLoss(
1533
+ sample_weight=sample_weight, n_classes=self.n_classes_
1534
+ )
1535
+ elif self.loss == "exponential":
1536
+ if self.n_classes_ > 2:
1537
+ raise ValueError(
1538
+ f"loss='{self.loss}' is only suitable for a binary classification "
1539
+ f"problem, you have n_classes={self.n_classes_}. "
1540
+ "Please use loss='log_loss' instead."
1541
+ )
1542
+ else:
1543
+ return ExponentialLoss(sample_weight=sample_weight)
1544
+
1545
+ def decision_function(self, X):
1546
+ """Compute the decision function of ``X``.
1547
+
1548
+ Parameters
1549
+ ----------
1550
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1551
+ The input samples. Internally, it will be converted to
1552
+ ``dtype=np.float32`` and if a sparse matrix is provided
1553
+ to a sparse ``csr_matrix``.
1554
+
1555
+ Returns
1556
+ -------
1557
+ score : ndarray of shape (n_samples, n_classes) or (n_samples,)
1558
+ The decision function of the input samples, which corresponds to
1559
+ the raw values predicted from the trees of the ensemble . The
1560
+ order of the classes corresponds to that in the attribute
1561
+ :term:`classes_`. Regression and binary classification produce an
1562
+ array of shape (n_samples,).
1563
+ """
1564
+ X = self._validate_data(
1565
+ X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
1566
+ )
1567
+ raw_predictions = self._raw_predict(X)
1568
+ if raw_predictions.shape[1] == 1:
1569
+ return raw_predictions.ravel()
1570
+ return raw_predictions
1571
+
1572
+ def staged_decision_function(self, X):
1573
+ """Compute decision function of ``X`` for each iteration.
1574
+
1575
+ This method allows monitoring (i.e. determine error on testing set)
1576
+ after each stage.
1577
+
1578
+ Parameters
1579
+ ----------
1580
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1581
+ The input samples. Internally, it will be converted to
1582
+ ``dtype=np.float32`` and if a sparse matrix is provided
1583
+ to a sparse ``csr_matrix``.
1584
+
1585
+ Yields
1586
+ ------
1587
+ score : generator of ndarray of shape (n_samples, k)
1588
+ The decision function of the input samples, which corresponds to
1589
+ the raw values predicted from the trees of the ensemble . The
1590
+ classes corresponds to that in the attribute :term:`classes_`.
1591
+ Regression and binary classification are special cases with
1592
+ ``k == 1``, otherwise ``k==n_classes``.
1593
+ """
1594
+ yield from self._staged_raw_predict(X)
1595
+
1596
+ def predict(self, X):
1597
+ """Predict class for X.
1598
+
1599
+ Parameters
1600
+ ----------
1601
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1602
+ The input samples. Internally, it will be converted to
1603
+ ``dtype=np.float32`` and if a sparse matrix is provided
1604
+ to a sparse ``csr_matrix``.
1605
+
1606
+ Returns
1607
+ -------
1608
+ y : ndarray of shape (n_samples,)
1609
+ The predicted values.
1610
+ """
1611
+ raw_predictions = self.decision_function(X)
1612
+ if raw_predictions.ndim == 1: # decision_function already squeezed it
1613
+ encoded_classes = (raw_predictions >= 0).astype(int)
1614
+ else:
1615
+ encoded_classes = np.argmax(raw_predictions, axis=1)
1616
+ return self.classes_[encoded_classes]
1617
+
1618
+ def staged_predict(self, X):
1619
+ """Predict class at each stage for X.
1620
+
1621
+ This method allows monitoring (i.e. determine error on testing set)
1622
+ after each stage.
1623
+
1624
+ Parameters
1625
+ ----------
1626
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1627
+ The input samples. Internally, it will be converted to
1628
+ ``dtype=np.float32`` and if a sparse matrix is provided
1629
+ to a sparse ``csr_matrix``.
1630
+
1631
+ Yields
1632
+ ------
1633
+ y : generator of ndarray of shape (n_samples,)
1634
+ The predicted value of the input samples.
1635
+ """
1636
+ if self.n_classes_ == 2: # n_trees_per_iteration_ = 1
1637
+ for raw_predictions in self._staged_raw_predict(X):
1638
+ encoded_classes = (raw_predictions.squeeze() >= 0).astype(int)
1639
+ yield self.classes_.take(encoded_classes, axis=0)
1640
+ else:
1641
+ for raw_predictions in self._staged_raw_predict(X):
1642
+ encoded_classes = np.argmax(raw_predictions, axis=1)
1643
+ yield self.classes_.take(encoded_classes, axis=0)
1644
+
1645
+ def predict_proba(self, X):
1646
+ """Predict class probabilities for X.
1647
+
1648
+ Parameters
1649
+ ----------
1650
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1651
+ The input samples. Internally, it will be converted to
1652
+ ``dtype=np.float32`` and if a sparse matrix is provided
1653
+ to a sparse ``csr_matrix``.
1654
+
1655
+ Returns
1656
+ -------
1657
+ p : ndarray of shape (n_samples, n_classes)
1658
+ The class probabilities of the input samples. The order of the
1659
+ classes corresponds to that in the attribute :term:`classes_`.
1660
+
1661
+ Raises
1662
+ ------
1663
+ AttributeError
1664
+ If the ``loss`` does not support probabilities.
1665
+ """
1666
+ raw_predictions = self.decision_function(X)
1667
+ return self._loss.predict_proba(raw_predictions)
1668
+
1669
+ def predict_log_proba(self, X):
1670
+ """Predict class log-probabilities for X.
1671
+
1672
+ Parameters
1673
+ ----------
1674
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1675
+ The input samples. Internally, it will be converted to
1676
+ ``dtype=np.float32`` and if a sparse matrix is provided
1677
+ to a sparse ``csr_matrix``.
1678
+
1679
+ Returns
1680
+ -------
1681
+ p : ndarray of shape (n_samples, n_classes)
1682
+ The class log-probabilities of the input samples. The order of the
1683
+ classes corresponds to that in the attribute :term:`classes_`.
1684
+
1685
+ Raises
1686
+ ------
1687
+ AttributeError
1688
+ If the ``loss`` does not support probabilities.
1689
+ """
1690
+ proba = self.predict_proba(X)
1691
+ return np.log(proba)
1692
+
1693
+ def staged_predict_proba(self, X):
1694
+ """Predict class probabilities at each stage for X.
1695
+
1696
+ This method allows monitoring (i.e. determine error on testing set)
1697
+ after each stage.
1698
+
1699
+ Parameters
1700
+ ----------
1701
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1702
+ The input samples. Internally, it will be converted to
1703
+ ``dtype=np.float32`` and if a sparse matrix is provided
1704
+ to a sparse ``csr_matrix``.
1705
+
1706
+ Yields
1707
+ ------
1708
+ y : generator of ndarray of shape (n_samples,)
1709
+ The predicted value of the input samples.
1710
+ """
1711
+ try:
1712
+ for raw_predictions in self._staged_raw_predict(X):
1713
+ yield self._loss.predict_proba(raw_predictions)
1714
+ except NotFittedError:
1715
+ raise
1716
+ except AttributeError as e:
1717
+ raise AttributeError(
1718
+ "loss=%r does not support predict_proba" % self.loss
1719
+ ) from e
1720
+
1721
+
1722
+ class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting):
1723
+ """Gradient Boosting for regression.
1724
+
1725
+ This estimator builds an additive model in a forward stage-wise fashion; it
1726
+ allows for the optimization of arbitrary differentiable loss functions. In
1727
+ each stage a regression tree is fit on the negative gradient of the given
1728
+ loss function.
1729
+
1730
+ :class:`sklearn.ensemble.HistGradientBoostingRegressor` is a much faster
1731
+ variant of this algorithm for intermediate datasets (`n_samples >= 10_000`).
1732
+
1733
+ Read more in the :ref:`User Guide <gradient_boosting>`.
1734
+
1735
+ Parameters
1736
+ ----------
1737
+ loss : {'squared_error', 'absolute_error', 'huber', 'quantile'}, \
1738
+ default='squared_error'
1739
+ Loss function to be optimized. 'squared_error' refers to the squared
1740
+ error for regression. 'absolute_error' refers to the absolute error of
1741
+ regression and is a robust loss function. 'huber' is a
1742
+ combination of the two. 'quantile' allows quantile regression (use
1743
+ `alpha` to specify the quantile).
1744
+
1745
+ learning_rate : float, default=0.1
1746
+ Learning rate shrinks the contribution of each tree by `learning_rate`.
1747
+ There is a trade-off between learning_rate and n_estimators.
1748
+ Values must be in the range `[0.0, inf)`.
1749
+
1750
+ n_estimators : int, default=100
1751
+ The number of boosting stages to perform. Gradient boosting
1752
+ is fairly robust to over-fitting so a large number usually
1753
+ results in better performance.
1754
+ Values must be in the range `[1, inf)`.
1755
+
1756
+ subsample : float, default=1.0
1757
+ The fraction of samples to be used for fitting the individual base
1758
+ learners. If smaller than 1.0 this results in Stochastic Gradient
1759
+ Boosting. `subsample` interacts with the parameter `n_estimators`.
1760
+ Choosing `subsample < 1.0` leads to a reduction of variance
1761
+ and an increase in bias.
1762
+ Values must be in the range `(0.0, 1.0]`.
1763
+
1764
+ criterion : {'friedman_mse', 'squared_error'}, default='friedman_mse'
1765
+ The function to measure the quality of a split. Supported criteria are
1766
+ "friedman_mse" for the mean squared error with improvement score by
1767
+ Friedman, "squared_error" for mean squared error. The default value of
1768
+ "friedman_mse" is generally the best as it can provide a better
1769
+ approximation in some cases.
1770
+
1771
+ .. versionadded:: 0.18
1772
+
1773
+ min_samples_split : int or float, default=2
1774
+ The minimum number of samples required to split an internal node:
1775
+
1776
+ - If int, values must be in the range `[2, inf)`.
1777
+ - If float, values must be in the range `(0.0, 1.0]` and `min_samples_split`
1778
+ will be `ceil(min_samples_split * n_samples)`.
1779
+
1780
+ .. versionchanged:: 0.18
1781
+ Added float values for fractions.
1782
+
1783
+ min_samples_leaf : int or float, default=1
1784
+ The minimum number of samples required to be at a leaf node.
1785
+ A split point at any depth will only be considered if it leaves at
1786
+ least ``min_samples_leaf`` training samples in each of the left and
1787
+ right branches. This may have the effect of smoothing the model,
1788
+ especially in regression.
1789
+
1790
+ - If int, values must be in the range `[1, inf)`.
1791
+ - If float, values must be in the range `(0.0, 1.0)` and `min_samples_leaf`
1792
+ will be `ceil(min_samples_leaf * n_samples)`.
1793
+
1794
+ .. versionchanged:: 0.18
1795
+ Added float values for fractions.
1796
+
1797
+ min_weight_fraction_leaf : float, default=0.0
1798
+ The minimum weighted fraction of the sum total of weights (of all
1799
+ the input samples) required to be at a leaf node. Samples have
1800
+ equal weight when sample_weight is not provided.
1801
+ Values must be in the range `[0.0, 0.5]`.
1802
+
1803
+ max_depth : int or None, default=3
1804
+ Maximum depth of the individual regression estimators. The maximum
1805
+ depth limits the number of nodes in the tree. Tune this parameter
1806
+ for best performance; the best value depends on the interaction
1807
+ of the input variables. If None, then nodes are expanded until
1808
+ all leaves are pure or until all leaves contain less than
1809
+ min_samples_split samples.
1810
+ If int, values must be in the range `[1, inf)`.
1811
+
1812
+ min_impurity_decrease : float, default=0.0
1813
+ A node will be split if this split induces a decrease of the impurity
1814
+ greater than or equal to this value.
1815
+ Values must be in the range `[0.0, inf)`.
1816
+
1817
+ The weighted impurity decrease equation is the following::
1818
+
1819
+ N_t / N * (impurity - N_t_R / N_t * right_impurity
1820
+ - N_t_L / N_t * left_impurity)
1821
+
1822
+ where ``N`` is the total number of samples, ``N_t`` is the number of
1823
+ samples at the current node, ``N_t_L`` is the number of samples in the
1824
+ left child, and ``N_t_R`` is the number of samples in the right child.
1825
+
1826
+ ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
1827
+ if ``sample_weight`` is passed.
1828
+
1829
+ .. versionadded:: 0.19
1830
+
1831
+ init : estimator or 'zero', default=None
1832
+ An estimator object that is used to compute the initial predictions.
1833
+ ``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the
1834
+ initial raw predictions are set to zero. By default a
1835
+ ``DummyEstimator`` is used, predicting either the average target value
1836
+ (for loss='squared_error'), or a quantile for the other losses.
1837
+
1838
+ random_state : int, RandomState instance or None, default=None
1839
+ Controls the random seed given to each Tree estimator at each
1840
+ boosting iteration.
1841
+ In addition, it controls the random permutation of the features at
1842
+ each split (see Notes for more details).
1843
+ It also controls the random splitting of the training data to obtain a
1844
+ validation set if `n_iter_no_change` is not None.
1845
+ Pass an int for reproducible output across multiple function calls.
1846
+ See :term:`Glossary <random_state>`.
1847
+
1848
+ max_features : {'sqrt', 'log2'}, int or float, default=None
1849
+ The number of features to consider when looking for the best split:
1850
+
1851
+ - If int, values must be in the range `[1, inf)`.
1852
+ - If float, values must be in the range `(0.0, 1.0]` and the features
1853
+ considered at each split will be `max(1, int(max_features * n_features_in_))`.
1854
+ - If "sqrt", then `max_features=sqrt(n_features)`.
1855
+ - If "log2", then `max_features=log2(n_features)`.
1856
+ - If None, then `max_features=n_features`.
1857
+
1858
+ Choosing `max_features < n_features` leads to a reduction of variance
1859
+ and an increase in bias.
1860
+
1861
+ Note: the search for a split does not stop until at least one
1862
+ valid partition of the node samples is found, even if it requires to
1863
+ effectively inspect more than ``max_features`` features.
1864
+
1865
+ alpha : float, default=0.9
1866
+ The alpha-quantile of the huber loss function and the quantile
1867
+ loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
1868
+ Values must be in the range `(0.0, 1.0)`.
1869
+
1870
+ verbose : int, default=0
1871
+ Enable verbose output. If 1 then it prints progress and performance
1872
+ once in a while (the more trees the lower the frequency). If greater
1873
+ than 1 then it prints progress and performance for every tree.
1874
+ Values must be in the range `[0, inf)`.
1875
+
1876
+ max_leaf_nodes : int, default=None
1877
+ Grow trees with ``max_leaf_nodes`` in best-first fashion.
1878
+ Best nodes are defined as relative reduction in impurity.
1879
+ Values must be in the range `[2, inf)`.
1880
+ If None, then unlimited number of leaf nodes.
1881
+
1882
+ warm_start : bool, default=False
1883
+ When set to ``True``, reuse the solution of the previous call to fit
1884
+ and add more estimators to the ensemble, otherwise, just erase the
1885
+ previous solution. See :term:`the Glossary <warm_start>`.
1886
+
1887
+ validation_fraction : float, default=0.1
1888
+ The proportion of training data to set aside as validation set for
1889
+ early stopping. Values must be in the range `(0.0, 1.0)`.
1890
+ Only used if ``n_iter_no_change`` is set to an integer.
1891
+
1892
+ .. versionadded:: 0.20
1893
+
1894
+ n_iter_no_change : int, default=None
1895
+ ``n_iter_no_change`` is used to decide if early stopping will be used
1896
+ to terminate training when validation score is not improving. By
1897
+ default it is set to None to disable early stopping. If set to a
1898
+ number, it will set aside ``validation_fraction`` size of the training
1899
+ data as validation and terminate training when validation score is not
1900
+ improving in all of the previous ``n_iter_no_change`` numbers of
1901
+ iterations.
1902
+ Values must be in the range `[1, inf)`.
1903
+ See
1904
+ :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`.
1905
+
1906
+ .. versionadded:: 0.20
1907
+
1908
+ tol : float, default=1e-4
1909
+ Tolerance for the early stopping. When the loss is not improving
1910
+ by at least tol for ``n_iter_no_change`` iterations (if set to a
1911
+ number), the training stops.
1912
+ Values must be in the range `[0.0, inf)`.
1913
+
1914
+ .. versionadded:: 0.20
1915
+
1916
+ ccp_alpha : non-negative float, default=0.0
1917
+ Complexity parameter used for Minimal Cost-Complexity Pruning. The
1918
+ subtree with the largest cost complexity that is smaller than
1919
+ ``ccp_alpha`` will be chosen. By default, no pruning is performed.
1920
+ Values must be in the range `[0.0, inf)`.
1921
+ See :ref:`minimal_cost_complexity_pruning` for details.
1922
+
1923
+ .. versionadded:: 0.22
1924
+
1925
+ Attributes
1926
+ ----------
1927
+ n_estimators_ : int
1928
+ The number of estimators as selected by early stopping (if
1929
+ ``n_iter_no_change`` is specified). Otherwise it is set to
1930
+ ``n_estimators``.
1931
+
1932
+ n_trees_per_iteration_ : int
1933
+ The number of trees that are built at each iteration. For regressors, this is
1934
+ always 1.
1935
+
1936
+ .. versionadded:: 1.4.0
1937
+
1938
+ feature_importances_ : ndarray of shape (n_features,)
1939
+ The impurity-based feature importances.
1940
+ The higher, the more important the feature.
1941
+ The importance of a feature is computed as the (normalized)
1942
+ total reduction of the criterion brought by that feature. It is also
1943
+ known as the Gini importance.
1944
+
1945
+ Warning: impurity-based feature importances can be misleading for
1946
+ high cardinality features (many unique values). See
1947
+ :func:`sklearn.inspection.permutation_importance` as an alternative.
1948
+
1949
+ oob_improvement_ : ndarray of shape (n_estimators,)
1950
+ The improvement in loss on the out-of-bag samples
1951
+ relative to the previous iteration.
1952
+ ``oob_improvement_[0]`` is the improvement in
1953
+ loss of the first stage over the ``init`` estimator.
1954
+ Only available if ``subsample < 1.0``.
1955
+
1956
+ oob_scores_ : ndarray of shape (n_estimators,)
1957
+ The full history of the loss values on the out-of-bag
1958
+ samples. Only available if `subsample < 1.0`.
1959
+
1960
+ .. versionadded:: 1.3
1961
+
1962
+ oob_score_ : float
1963
+ The last value of the loss on the out-of-bag samples. It is
1964
+ the same as `oob_scores_[-1]`. Only available if `subsample < 1.0`.
1965
+
1966
+ .. versionadded:: 1.3
1967
+
1968
+ train_score_ : ndarray of shape (n_estimators,)
1969
+ The i-th score ``train_score_[i]`` is the loss of the
1970
+ model at iteration ``i`` on the in-bag sample.
1971
+ If ``subsample == 1`` this is the loss on the training data.
1972
+
1973
+ init_ : estimator
1974
+ The estimator that provides the initial predictions. Set via the ``init``
1975
+ argument.
1976
+
1977
+ estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1)
1978
+ The collection of fitted sub-estimators.
1979
+
1980
+ n_features_in_ : int
1981
+ Number of features seen during :term:`fit`.
1982
+
1983
+ .. versionadded:: 0.24
1984
+
1985
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1986
+ Names of features seen during :term:`fit`. Defined only when `X`
1987
+ has feature names that are all strings.
1988
+
1989
+ .. versionadded:: 1.0
1990
+
1991
+ max_features_ : int
1992
+ The inferred value of max_features.
1993
+
1994
+ See Also
1995
+ --------
1996
+ HistGradientBoostingRegressor : Histogram-based Gradient Boosting
1997
+ Classification Tree.
1998
+ sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
1999
+ sklearn.ensemble.RandomForestRegressor : A random forest regressor.
2000
+
2001
+ Notes
2002
+ -----
2003
+ The features are always randomly permuted at each split. Therefore,
2004
+ the best found split may vary, even with the same training data and
2005
+ ``max_features=n_features``, if the improvement of the criterion is
2006
+ identical for several splits enumerated during the search of the best
2007
+ split. To obtain a deterministic behaviour during fitting,
2008
+ ``random_state`` has to be fixed.
2009
+
2010
+ References
2011
+ ----------
2012
+ J. Friedman, Greedy Function Approximation: A Gradient Boosting
2013
+ Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
2014
+
2015
+ J. Friedman, Stochastic Gradient Boosting, 1999
2016
+
2017
+ T. Hastie, R. Tibshirani and J. Friedman.
2018
+ Elements of Statistical Learning Ed. 2, Springer, 2009.
2019
+
2020
+ Examples
2021
+ --------
2022
+ >>> from sklearn.datasets import make_regression
2023
+ >>> from sklearn.ensemble import GradientBoostingRegressor
2024
+ >>> from sklearn.model_selection import train_test_split
2025
+ >>> X, y = make_regression(random_state=0)
2026
+ >>> X_train, X_test, y_train, y_test = train_test_split(
2027
+ ... X, y, random_state=0)
2028
+ >>> reg = GradientBoostingRegressor(random_state=0)
2029
+ >>> reg.fit(X_train, y_train)
2030
+ GradientBoostingRegressor(random_state=0)
2031
+ >>> reg.predict(X_test[1:2])
2032
+ array([-61...])
2033
+ >>> reg.score(X_test, y_test)
2034
+ 0.4...
2035
+ """
2036
+
2037
+ _parameter_constraints: dict = {
2038
+ **BaseGradientBoosting._parameter_constraints,
2039
+ "loss": [StrOptions({"squared_error", "absolute_error", "huber", "quantile"})],
2040
+ "init": [StrOptions({"zero"}), None, HasMethods(["fit", "predict"])],
2041
+ "alpha": [Interval(Real, 0.0, 1.0, closed="neither")],
2042
+ }
2043
+
2044
+ def __init__(
2045
+ self,
2046
+ *,
2047
+ loss="squared_error",
2048
+ learning_rate=0.1,
2049
+ n_estimators=100,
2050
+ subsample=1.0,
2051
+ criterion="friedman_mse",
2052
+ min_samples_split=2,
2053
+ min_samples_leaf=1,
2054
+ min_weight_fraction_leaf=0.0,
2055
+ max_depth=3,
2056
+ min_impurity_decrease=0.0,
2057
+ init=None,
2058
+ random_state=None,
2059
+ max_features=None,
2060
+ alpha=0.9,
2061
+ verbose=0,
2062
+ max_leaf_nodes=None,
2063
+ warm_start=False,
2064
+ validation_fraction=0.1,
2065
+ n_iter_no_change=None,
2066
+ tol=1e-4,
2067
+ ccp_alpha=0.0,
2068
+ ):
2069
+ super().__init__(
2070
+ loss=loss,
2071
+ learning_rate=learning_rate,
2072
+ n_estimators=n_estimators,
2073
+ criterion=criterion,
2074
+ min_samples_split=min_samples_split,
2075
+ min_samples_leaf=min_samples_leaf,
2076
+ min_weight_fraction_leaf=min_weight_fraction_leaf,
2077
+ max_depth=max_depth,
2078
+ init=init,
2079
+ subsample=subsample,
2080
+ max_features=max_features,
2081
+ min_impurity_decrease=min_impurity_decrease,
2082
+ random_state=random_state,
2083
+ alpha=alpha,
2084
+ verbose=verbose,
2085
+ max_leaf_nodes=max_leaf_nodes,
2086
+ warm_start=warm_start,
2087
+ validation_fraction=validation_fraction,
2088
+ n_iter_no_change=n_iter_no_change,
2089
+ tol=tol,
2090
+ ccp_alpha=ccp_alpha,
2091
+ )
2092
+
2093
+ def _encode_y(self, y=None, sample_weight=None):
2094
+ # Just convert y to the expected dtype
2095
+ self.n_trees_per_iteration_ = 1
2096
+ y = y.astype(DOUBLE, copy=False)
2097
+ return y
2098
+
2099
+ def _get_loss(self, sample_weight):
2100
+ if self.loss in ("quantile", "huber"):
2101
+ return _LOSSES[self.loss](sample_weight=sample_weight, quantile=self.alpha)
2102
+ else:
2103
+ return _LOSSES[self.loss](sample_weight=sample_weight)
2104
+
2105
+ def predict(self, X):
2106
+ """Predict regression target for X.
2107
+
2108
+ Parameters
2109
+ ----------
2110
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2111
+ The input samples. Internally, it will be converted to
2112
+ ``dtype=np.float32`` and if a sparse matrix is provided
2113
+ to a sparse ``csr_matrix``.
2114
+
2115
+ Returns
2116
+ -------
2117
+ y : ndarray of shape (n_samples,)
2118
+ The predicted values.
2119
+ """
2120
+ X = self._validate_data(
2121
+ X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
2122
+ )
2123
+ # In regression we can directly return the raw value from the trees.
2124
+ return self._raw_predict(X).ravel()
2125
+
2126
+ def staged_predict(self, X):
2127
+ """Predict regression target at each stage for X.
2128
+
2129
+ This method allows monitoring (i.e. determine error on testing set)
2130
+ after each stage.
2131
+
2132
+ Parameters
2133
+ ----------
2134
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2135
+ The input samples. Internally, it will be converted to
2136
+ ``dtype=np.float32`` and if a sparse matrix is provided
2137
+ to a sparse ``csr_matrix``.
2138
+
2139
+ Yields
2140
+ ------
2141
+ y : generator of ndarray of shape (n_samples,)
2142
+ The predicted value of the input samples.
2143
+ """
2144
+ for raw_predictions in self._staged_raw_predict(X):
2145
+ yield raw_predictions.ravel()
2146
+
2147
+ def apply(self, X):
2148
+ """Apply trees in the ensemble to X, return leaf indices.
2149
+
2150
+ .. versionadded:: 0.17
2151
+
2152
+ Parameters
2153
+ ----------
2154
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2155
+ The input samples. Internally, its dtype will be converted to
2156
+ ``dtype=np.float32``. If a sparse matrix is provided, it will
2157
+ be converted to a sparse ``csr_matrix``.
2158
+
2159
+ Returns
2160
+ -------
2161
+ X_leaves : array-like of shape (n_samples, n_estimators)
2162
+ For each datapoint x in X and for each tree in the ensemble,
2163
+ return the index of the leaf x ends up in each estimator.
2164
+ """
2165
+
2166
+ leaves = super().apply(X)
2167
+ leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
2168
+ return leaves
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_gradient_boosting.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (254 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """This module implements histogram-based gradient boosting estimators.
2
+
3
+ The implementation is a port from pygbm which is itself strongly inspired
4
+ from LightGBM.
5
+ """
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_binning.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (221 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (221 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .common cimport X_BINNED_DTYPE_C
2
+ from .common cimport BITSET_DTYPE_C
3
+ from .common cimport BITSET_INNER_DTYPE_C
4
+ from .common cimport X_DTYPE_C
5
+
6
+ cdef void init_bitset(BITSET_DTYPE_C bitset) noexcept nogil
7
+
8
+ cdef void set_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil
9
+
10
+ cdef unsigned char in_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil
11
+
12
+ cpdef unsigned char in_bitset_memoryview(const BITSET_INNER_DTYPE_C[:] bitset,
13
+ X_BINNED_DTYPE_C val) noexcept nogil
14
+
15
+ cdef unsigned char in_bitset_2d_memoryview(
16
+ const BITSET_INNER_DTYPE_C [:, :] bitset,
17
+ X_BINNED_DTYPE_C val,
18
+ unsigned int row) noexcept nogil
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (229 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_predictor.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (250 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the BinMapper class.
3
+
4
+ BinMapper is used for mapping a real-valued dataset into integer-valued bins.
5
+ Bin thresholds are computed with the quantiles so that each bin contains
6
+ approximately the same number of samples.
7
+ """
8
+ # Author: Nicolas Hug
9
+
10
+ import numpy as np
11
+
12
+ from ...base import BaseEstimator, TransformerMixin
13
+ from ...utils import check_array, check_random_state
14
+ from ...utils._openmp_helpers import _openmp_effective_n_threads
15
+ from ...utils.fixes import percentile
16
+ from ...utils.validation import check_is_fitted
17
+ from ._binning import _map_to_bins
18
+ from ._bitset import set_bitset_memoryview
19
+ from .common import ALMOST_INF, X_BINNED_DTYPE, X_BITSET_INNER_DTYPE, X_DTYPE
20
+
21
+
22
+ def _find_binning_thresholds(col_data, max_bins):
23
+ """Extract quantiles from a continuous feature.
24
+
25
+ Missing values are ignored for finding the thresholds.
26
+
27
+ Parameters
28
+ ----------
29
+ col_data : array-like, shape (n_samples,)
30
+ The continuous feature to bin.
31
+ max_bins: int
32
+ The maximum number of bins to use for non-missing values. If for a
33
+ given feature the number of unique values is less than ``max_bins``,
34
+ then those unique values will be used to compute the bin thresholds,
35
+ instead of the quantiles
36
+
37
+ Return
38
+ ------
39
+ binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,)
40
+ The increasing numeric values that can be used to separate the bins.
41
+ A given value x will be mapped into bin value i iff
42
+ bining_thresholds[i - 1] < x <= binning_thresholds[i]
43
+ """
44
+ # ignore missing values when computing bin thresholds
45
+ missing_mask = np.isnan(col_data)
46
+ if missing_mask.any():
47
+ col_data = col_data[~missing_mask]
48
+ col_data = np.ascontiguousarray(col_data, dtype=X_DTYPE)
49
+ distinct_values = np.unique(col_data)
50
+ if len(distinct_values) <= max_bins:
51
+ midpoints = distinct_values[:-1] + distinct_values[1:]
52
+ midpoints *= 0.5
53
+ else:
54
+ # We sort again the data in this case. We could compute
55
+ # approximate midpoint percentiles using the output of
56
+ # np.unique(col_data, return_counts) instead but this is more
57
+ # work and the performance benefit will be limited because we
58
+ # work on a fixed-size subsample of the full data.
59
+ percentiles = np.linspace(0, 100, num=max_bins + 1)
60
+ percentiles = percentiles[1:-1]
61
+ midpoints = percentile(col_data, percentiles, method="midpoint").astype(X_DTYPE)
62
+ assert midpoints.shape[0] == max_bins - 1
63
+
64
+ # We avoid having +inf thresholds: +inf thresholds are only allowed in
65
+ # a "split on nan" situation.
66
+ np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)
67
+ return midpoints
68
+
69
+
70
+ class _BinMapper(TransformerMixin, BaseEstimator):
71
+ """Transformer that maps a dataset into integer-valued bins.
72
+
73
+ For continuous features, the bins are created in a feature-wise fashion,
74
+ using quantiles so that each bins contains approximately the same number
75
+ of samples. For large datasets, quantiles are computed on a subset of the
76
+ data to speed-up the binning, but the quantiles should remain stable.
77
+
78
+ For categorical features, the raw categorical values are expected to be
79
+ in [0, 254] (this is not validated here though) and each category
80
+ corresponds to a bin. All categorical values must be known at
81
+ initialization: transform() doesn't know how to bin unknown categorical
82
+ values. Note that transform() is only used on non-training data in the
83
+ case of early stopping.
84
+
85
+ Features with a small number of values may be binned into less than
86
+ ``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved
87
+ for missing values.
88
+
89
+ Parameters
90
+ ----------
91
+ n_bins : int, default=256
92
+ The maximum number of bins to use (including the bin for missing
93
+ values). Should be in [3, 256]. Non-missing values are binned on
94
+ ``max_bins = n_bins - 1`` bins. The last bin is always reserved for
95
+ missing values. If for a given feature the number of unique values is
96
+ less than ``max_bins``, then those unique values will be used to
97
+ compute the bin thresholds, instead of the quantiles. For categorical
98
+ features indicated by ``is_categorical``, the docstring for
99
+ ``is_categorical`` details on this procedure.
100
+ subsample : int or None, default=2e5
101
+ If ``n_samples > subsample``, then ``sub_samples`` samples will be
102
+ randomly chosen to compute the quantiles. If ``None``, the whole data
103
+ is used.
104
+ is_categorical : ndarray of bool of shape (n_features,), default=None
105
+ Indicates categorical features. By default, all features are
106
+ considered continuous.
107
+ known_categories : list of {ndarray, None} of shape (n_features,), \
108
+ default=none
109
+ For each categorical feature, the array indicates the set of unique
110
+ categorical values. These should be the possible values over all the
111
+ data, not just the training data. For continuous features, the
112
+ corresponding entry should be None.
113
+ random_state: int, RandomState instance or None, default=None
114
+ Pseudo-random number generator to control the random sub-sampling.
115
+ Pass an int for reproducible output across multiple
116
+ function calls.
117
+ See :term:`Glossary <random_state>`.
118
+ n_threads : int, default=None
119
+ Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
120
+ to determine the effective number of threads use, which takes cgroups CPU
121
+ quotes into account. See the docstring of `_openmp_effective_n_threads`
122
+ for details.
123
+
124
+ Attributes
125
+ ----------
126
+ bin_thresholds_ : list of ndarray
127
+ For each feature, each array indicates how to map a feature into a
128
+ binned feature. The semantic and size depends on the nature of the
129
+ feature:
130
+ - for real-valued features, the array corresponds to the real-valued
131
+ bin thresholds (the upper bound of each bin). There are ``max_bins
132
+ - 1`` thresholds, where ``max_bins = n_bins - 1`` is the number of
133
+ bins used for non-missing values.
134
+ - for categorical features, the array is a map from a binned category
135
+ value to the raw category value. The size of the array is equal to
136
+ ``min(max_bins, category_cardinality)`` where we ignore missing
137
+ values in the cardinality.
138
+ n_bins_non_missing_ : ndarray, dtype=np.uint32
139
+ For each feature, gives the number of bins actually used for
140
+ non-missing values. For features with a lot of unique values, this is
141
+ equal to ``n_bins - 1``.
142
+ is_categorical_ : ndarray of shape (n_features,), dtype=np.uint8
143
+ Indicator for categorical features.
144
+ missing_values_bin_idx_ : np.uint8
145
+ The index of the bin where missing values are mapped. This is a
146
+ constant across all features. This corresponds to the last bin, and
147
+ it is always equal to ``n_bins - 1``. Note that if ``n_bins_non_missing_``
148
+ is less than ``n_bins - 1`` for a given feature, then there are
149
+ empty (and unused) bins.
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ n_bins=256,
155
+ subsample=int(2e5),
156
+ is_categorical=None,
157
+ known_categories=None,
158
+ random_state=None,
159
+ n_threads=None,
160
+ ):
161
+ self.n_bins = n_bins
162
+ self.subsample = subsample
163
+ self.is_categorical = is_categorical
164
+ self.known_categories = known_categories
165
+ self.random_state = random_state
166
+ self.n_threads = n_threads
167
+
168
+ def fit(self, X, y=None):
169
+ """Fit data X by computing the binning thresholds.
170
+
171
+ The last bin is reserved for missing values, whether missing values
172
+ are present in the data or not.
173
+
174
+ Parameters
175
+ ----------
176
+ X : array-like of shape (n_samples, n_features)
177
+ The data to bin.
178
+ y: None
179
+ Ignored.
180
+
181
+ Returns
182
+ -------
183
+ self : object
184
+ """
185
+ if not (3 <= self.n_bins <= 256):
186
+ # min is 3: at least 2 distinct bins and a missing values bin
187
+ raise ValueError(
188
+ "n_bins={} should be no smaller than 3 and no larger than 256.".format(
189
+ self.n_bins
190
+ )
191
+ )
192
+
193
+ X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
194
+ max_bins = self.n_bins - 1
195
+
196
+ rng = check_random_state(self.random_state)
197
+ if self.subsample is not None and X.shape[0] > self.subsample:
198
+ subset = rng.choice(X.shape[0], self.subsample, replace=False)
199
+ X = X.take(subset, axis=0)
200
+
201
+ if self.is_categorical is None:
202
+ self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8)
203
+ else:
204
+ self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8)
205
+
206
+ n_features = X.shape[1]
207
+ known_categories = self.known_categories
208
+ if known_categories is None:
209
+ known_categories = [None] * n_features
210
+
211
+ # validate is_categorical and known_categories parameters
212
+ for f_idx in range(n_features):
213
+ is_categorical = self.is_categorical_[f_idx]
214
+ known_cats = known_categories[f_idx]
215
+ if is_categorical and known_cats is None:
216
+ raise ValueError(
217
+ f"Known categories for feature {f_idx} must be provided."
218
+ )
219
+ if not is_categorical and known_cats is not None:
220
+ raise ValueError(
221
+ f"Feature {f_idx} isn't marked as a categorical feature, "
222
+ "but categories were passed."
223
+ )
224
+
225
+ self.missing_values_bin_idx_ = self.n_bins - 1
226
+
227
+ self.bin_thresholds_ = []
228
+ n_bins_non_missing = []
229
+
230
+ for f_idx in range(n_features):
231
+ if not self.is_categorical_[f_idx]:
232
+ thresholds = _find_binning_thresholds(X[:, f_idx], max_bins)
233
+ n_bins_non_missing.append(thresholds.shape[0] + 1)
234
+ else:
235
+ # Since categories are assumed to be encoded in
236
+ # [0, n_cats] and since n_cats <= max_bins,
237
+ # the thresholds *are* the unique categorical values. This will
238
+ # lead to the correct mapping in transform()
239
+ thresholds = known_categories[f_idx]
240
+ n_bins_non_missing.append(thresholds.shape[0])
241
+
242
+ self.bin_thresholds_.append(thresholds)
243
+
244
+ self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32)
245
+ return self
246
+
247
+ def transform(self, X):
248
+ """Bin data X.
249
+
250
+ Missing values will be mapped to the last bin.
251
+
252
+ For categorical features, the mapping will be incorrect for unknown
253
+ categories. Since the BinMapper is given known_categories of the
254
+ entire training data (i.e. before the call to train_test_split() in
255
+ case of early-stopping), this never happens.
256
+
257
+ Parameters
258
+ ----------
259
+ X : array-like of shape (n_samples, n_features)
260
+ The data to bin.
261
+
262
+ Returns
263
+ -------
264
+ X_binned : array-like of shape (n_samples, n_features)
265
+ The binned data (fortran-aligned).
266
+ """
267
+ X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
268
+ check_is_fitted(self)
269
+ if X.shape[1] != self.n_bins_non_missing_.shape[0]:
270
+ raise ValueError(
271
+ "This estimator was fitted with {} features but {} got passed "
272
+ "to transform()".format(self.n_bins_non_missing_.shape[0], X.shape[1])
273
+ )
274
+
275
+ n_threads = _openmp_effective_n_threads(self.n_threads)
276
+ binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F")
277
+ _map_to_bins(
278
+ X,
279
+ self.bin_thresholds_,
280
+ self.is_categorical_,
281
+ self.missing_values_bin_idx_,
282
+ n_threads,
283
+ binned,
284
+ )
285
+ return binned
286
+
287
+ def make_known_categories_bitsets(self):
288
+ """Create bitsets of known categories.
289
+
290
+ Returns
291
+ -------
292
+ - known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
293
+ Array of bitsets of known categories, for each categorical feature.
294
+ - f_idx_map : ndarray of shape (n_features,)
295
+ Map from original feature index to the corresponding index in the
296
+ known_cat_bitsets array.
297
+ """
298
+
299
+ categorical_features_indices = np.flatnonzero(self.is_categorical_)
300
+
301
+ n_features = self.is_categorical_.size
302
+ n_categorical_features = categorical_features_indices.size
303
+
304
+ f_idx_map = np.zeros(n_features, dtype=np.uint32)
305
+ f_idx_map[categorical_features_indices] = np.arange(
306
+ n_categorical_features, dtype=np.uint32
307
+ )
308
+
309
+ known_categories = self.bin_thresholds_
310
+
311
+ known_cat_bitsets = np.zeros(
312
+ (n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE
313
+ )
314
+
315
+ # TODO: complexity is O(n_categorical_features * 255). Maybe this is
316
+ # worth cythonizing
317
+ for mapped_f_idx, f_idx in enumerate(categorical_features_indices):
318
+ for raw_cat_val in known_categories[f_idx]:
319
+ set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val)
320
+
321
+ return known_cat_bitsets, f_idx_map
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (146 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as cnp
2
+ from sklearn.utils._typedefs cimport intp_t
3
+
4
+ cnp.import_array()
5
+
6
+
7
+ ctypedef cnp.npy_float64 X_DTYPE_C
8
+ ctypedef cnp.npy_uint8 X_BINNED_DTYPE_C
9
+ ctypedef cnp.npy_float64 Y_DTYPE_C
10
+ ctypedef cnp.npy_float32 G_H_DTYPE_C
11
+ ctypedef cnp.npy_uint32 BITSET_INNER_DTYPE_C
12
+ ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C
13
+
14
+ cdef packed struct hist_struct:
15
+ # Same as histogram dtype but we need a struct to declare views. It needs
16
+ # to be packed since by default numpy dtypes aren't aligned
17
+ Y_DTYPE_C sum_gradients
18
+ Y_DTYPE_C sum_hessians
19
+ unsigned int count
20
+
21
+
22
+ cdef packed struct node_struct:
23
+ # Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It
24
+ # needs to be packed since by default numpy dtypes aren't aligned
25
+ Y_DTYPE_C value
26
+ unsigned int count
27
+ intp_t feature_idx
28
+ X_DTYPE_C num_threshold
29
+ unsigned char missing_go_to_left
30
+ unsigned int left
31
+ unsigned int right
32
+ Y_DTYPE_C gain
33
+ unsigned int depth
34
+ unsigned char is_leaf
35
+ X_BINNED_DTYPE_C bin_threshold
36
+ unsigned char is_categorical
37
+ # The index of the corresponding bitsets in the Predictor's bitset arrays.
38
+ # Only used if is_categorical is True
39
+ unsigned int bitset_idx
40
+
41
+ cpdef enum MonotonicConstraint:
42
+ NO_CST = 0
43
+ POS = 1
44
+ NEG = -1
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py ADDED
@@ -0,0 +1,2270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Fast Gradient Boosting decision trees for classification and regression."""
2
+
3
+ # Author: Nicolas Hug
4
+
5
+ import itertools
6
+ import warnings
7
+ from abc import ABC, abstractmethod
8
+ from contextlib import contextmanager, nullcontext, suppress
9
+ from functools import partial
10
+ from numbers import Integral, Real
11
+ from time import time
12
+
13
+ import numpy as np
14
+
15
+ from ..._loss.loss import (
16
+ _LOSSES,
17
+ BaseLoss,
18
+ HalfBinomialLoss,
19
+ HalfGammaLoss,
20
+ HalfMultinomialLoss,
21
+ HalfPoissonLoss,
22
+ PinballLoss,
23
+ )
24
+ from ...base import (
25
+ BaseEstimator,
26
+ ClassifierMixin,
27
+ RegressorMixin,
28
+ _fit_context,
29
+ is_classifier,
30
+ )
31
+ from ...compose import ColumnTransformer
32
+ from ...metrics import check_scoring
33
+ from ...metrics._scorer import _SCORERS
34
+ from ...model_selection import train_test_split
35
+ from ...preprocessing import FunctionTransformer, LabelEncoder, OrdinalEncoder
36
+ from ...utils import check_random_state, compute_sample_weight, is_scalar_nan, resample
37
+ from ...utils._openmp_helpers import _openmp_effective_n_threads
38
+ from ...utils._param_validation import Hidden, Interval, RealNotInt, StrOptions
39
+ from ...utils.multiclass import check_classification_targets
40
+ from ...utils.validation import (
41
+ _check_monotonic_cst,
42
+ _check_sample_weight,
43
+ _check_y,
44
+ _is_pandas_df,
45
+ check_array,
46
+ check_consistent_length,
47
+ check_is_fitted,
48
+ )
49
+ from ._gradient_boosting import _update_raw_predictions
50
+ from .binning import _BinMapper
51
+ from .common import G_H_DTYPE, X_DTYPE, Y_DTYPE
52
+ from .grower import TreeGrower
53
+
54
+ _LOSSES = _LOSSES.copy()
55
+ _LOSSES.update(
56
+ {
57
+ "poisson": HalfPoissonLoss,
58
+ "gamma": HalfGammaLoss,
59
+ "quantile": PinballLoss,
60
+ }
61
+ )
62
+
63
+
64
+ def _update_leaves_values(loss, grower, y_true, raw_prediction, sample_weight):
65
+ """Update the leaf values to be predicted by the tree.
66
+
67
+ Update equals:
68
+ loss.fit_intercept_only(y_true - raw_prediction)
69
+
70
+ This is only applied if loss.differentiable is False.
71
+ Note: It only works, if the loss is a function of the residual, as is the
72
+ case for AbsoluteError and PinballLoss. Otherwise, one would need to get
73
+ the minimum of loss(y_true, raw_prediction + x) in x. A few examples:
74
+ - AbsoluteError: median(y_true - raw_prediction).
75
+ - PinballLoss: quantile(y_true - raw_prediction).
76
+
77
+ More background:
78
+ For the standard gradient descent method according to "Greedy Function
79
+ Approximation: A Gradient Boosting Machine" by Friedman, all loss functions but the
80
+ squared loss need a line search step. BaseHistGradientBoosting, however, implements
81
+ a so called Newton boosting where the trees are fitted to a 2nd order
82
+ approximations of the loss in terms of gradients and hessians. In this case, the
83
+ line search step is only necessary if the loss is not smooth, i.e. not
84
+ differentiable, which renders the 2nd order approximation invalid. In fact,
85
+ non-smooth losses arbitrarily set hessians to 1 and effectively use the standard
86
+ gradient descent method with line search.
87
+ """
88
+ # TODO: Ideally this should be computed in parallel over the leaves using something
89
+ # similar to _update_raw_predictions(), but this requires a cython version of
90
+ # median().
91
+ for leaf in grower.finalized_leaves:
92
+ indices = leaf.sample_indices
93
+ if sample_weight is None:
94
+ sw = None
95
+ else:
96
+ sw = sample_weight[indices]
97
+ update = loss.fit_intercept_only(
98
+ y_true=y_true[indices] - raw_prediction[indices],
99
+ sample_weight=sw,
100
+ )
101
+ leaf.value = grower.shrinkage * update
102
+ # Note that the regularization is ignored here
103
+
104
+
105
+ @contextmanager
106
+ def _patch_raw_predict(estimator, raw_predictions):
107
+ """Context manager that patches _raw_predict to return raw_predictions.
108
+
109
+ `raw_predictions` is typically a precomputed array to avoid redundant
110
+ state-wise computations fitting with early stopping enabled: in this case
111
+ `raw_predictions` is incrementally updated whenever we add a tree to the
112
+ boosted ensemble.
113
+
114
+ Note: this makes fitting HistGradientBoosting* models inherently non thread
115
+ safe at fit time. However thread-safety at fit time was never guaranteed nor
116
+ enforced for scikit-learn estimators in general.
117
+
118
+ Thread-safety at prediction/transform time is another matter as those
119
+ operations are typically side-effect free and therefore often thread-safe by
120
+ default for most scikit-learn models and would like to keep it that way.
121
+ Therefore this context manager should only be used at fit time.
122
+
123
+ TODO: in the future, we could explore the possibility to extend the scorer
124
+ public API to expose a way to compute vales from raw predictions. That would
125
+ probably require also making the scorer aware of the inverse link function
126
+ used by the estimator which is typically private API for now, hence the need
127
+ for this patching mechanism.
128
+ """
129
+ orig_raw_predict = estimator._raw_predict
130
+
131
+ def _patched_raw_predicts(*args, **kwargs):
132
+ return raw_predictions
133
+
134
+ estimator._raw_predict = _patched_raw_predicts
135
+ yield estimator
136
+ estimator._raw_predict = orig_raw_predict
137
+
138
+
139
+ class BaseHistGradientBoosting(BaseEstimator, ABC):
140
+ """Base class for histogram-based gradient boosting estimators."""
141
+
142
+ _parameter_constraints: dict = {
143
+ "loss": [BaseLoss],
144
+ "learning_rate": [Interval(Real, 0, None, closed="neither")],
145
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
146
+ "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
147
+ "max_depth": [Interval(Integral, 1, None, closed="left"), None],
148
+ "min_samples_leaf": [Interval(Integral, 1, None, closed="left")],
149
+ "l2_regularization": [Interval(Real, 0, None, closed="left")],
150
+ "max_features": [Interval(RealNotInt, 0, 1, closed="right")],
151
+ "monotonic_cst": ["array-like", dict, None],
152
+ "interaction_cst": [
153
+ list,
154
+ tuple,
155
+ StrOptions({"pairwise", "no_interactions"}),
156
+ None,
157
+ ],
158
+ "n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
159
+ "validation_fraction": [
160
+ Interval(RealNotInt, 0, 1, closed="neither"),
161
+ Interval(Integral, 1, None, closed="left"),
162
+ None,
163
+ ],
164
+ "tol": [Interval(Real, 0, None, closed="left")],
165
+ "max_bins": [Interval(Integral, 2, 255, closed="both")],
166
+ "categorical_features": [
167
+ "array-like",
168
+ StrOptions({"from_dtype"}),
169
+ Hidden(StrOptions({"warn"})),
170
+ None,
171
+ ],
172
+ "warm_start": ["boolean"],
173
+ "early_stopping": [StrOptions({"auto"}), "boolean"],
174
+ "scoring": [str, callable, None],
175
+ "verbose": ["verbose"],
176
+ "random_state": ["random_state"],
177
+ }
178
+
179
+ @abstractmethod
180
+ def __init__(
181
+ self,
182
+ loss,
183
+ *,
184
+ learning_rate,
185
+ max_iter,
186
+ max_leaf_nodes,
187
+ max_depth,
188
+ min_samples_leaf,
189
+ l2_regularization,
190
+ max_features,
191
+ max_bins,
192
+ categorical_features,
193
+ monotonic_cst,
194
+ interaction_cst,
195
+ warm_start,
196
+ early_stopping,
197
+ scoring,
198
+ validation_fraction,
199
+ n_iter_no_change,
200
+ tol,
201
+ verbose,
202
+ random_state,
203
+ ):
204
+ self.loss = loss
205
+ self.learning_rate = learning_rate
206
+ self.max_iter = max_iter
207
+ self.max_leaf_nodes = max_leaf_nodes
208
+ self.max_depth = max_depth
209
+ self.min_samples_leaf = min_samples_leaf
210
+ self.l2_regularization = l2_regularization
211
+ self.max_features = max_features
212
+ self.max_bins = max_bins
213
+ self.monotonic_cst = monotonic_cst
214
+ self.interaction_cst = interaction_cst
215
+ self.categorical_features = categorical_features
216
+ self.warm_start = warm_start
217
+ self.early_stopping = early_stopping
218
+ self.scoring = scoring
219
+ self.validation_fraction = validation_fraction
220
+ self.n_iter_no_change = n_iter_no_change
221
+ self.tol = tol
222
+ self.verbose = verbose
223
+ self.random_state = random_state
224
+
225
+ def _validate_parameters(self):
226
+ """Validate parameters passed to __init__.
227
+
228
+ The parameters that are directly passed to the grower are checked in
229
+ TreeGrower."""
230
+ if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
231
+ raise ValueError(
232
+ "monotonic constraints are not supported for multiclass classification."
233
+ )
234
+
235
+ def _finalize_sample_weight(self, sample_weight, y):
236
+ """Finalize sample weight.
237
+
238
+ Used by subclasses to adjust sample_weights. This is useful for implementing
239
+ class weights.
240
+ """
241
+ return sample_weight
242
+
243
+ def _preprocess_X(self, X, *, reset):
244
+ """Preprocess and validate X.
245
+
246
+ Parameters
247
+ ----------
248
+ X : {array-like, pandas DataFrame} of shape (n_samples, n_features)
249
+ Input data.
250
+
251
+ reset : bool
252
+ Whether to reset the `n_features_in_` and `feature_names_in_ attributes.
253
+
254
+ Returns
255
+ -------
256
+ X : ndarray of shape (n_samples, n_features)
257
+ Validated input data.
258
+
259
+ known_categories : list of ndarray of shape (n_categories,)
260
+ List of known categories for each categorical feature.
261
+ """
262
+ # If there is a preprocessor, we let the preprocessor handle the validation.
263
+ # Otherwise, we validate the data ourselves.
264
+ check_X_kwargs = dict(dtype=[X_DTYPE], force_all_finite=False)
265
+ if not reset:
266
+ if self._preprocessor is None:
267
+ return self._validate_data(X, reset=False, **check_X_kwargs)
268
+ return self._preprocessor.transform(X)
269
+
270
+ # At this point, reset is False, which runs during `fit`.
271
+ self.is_categorical_ = self._check_categorical_features(X)
272
+
273
+ if self.is_categorical_ is None:
274
+ self._preprocessor = None
275
+ self._is_categorical_remapped = None
276
+
277
+ X = self._validate_data(X, **check_X_kwargs)
278
+ return X, None
279
+
280
+ n_features = X.shape[1]
281
+ ordinal_encoder = OrdinalEncoder(
282
+ categories="auto",
283
+ handle_unknown="use_encoded_value",
284
+ unknown_value=np.nan,
285
+ encoded_missing_value=np.nan,
286
+ dtype=X_DTYPE,
287
+ )
288
+
289
+ check_X = partial(check_array, **check_X_kwargs)
290
+ numerical_preprocessor = FunctionTransformer(check_X)
291
+ self._preprocessor = ColumnTransformer(
292
+ [
293
+ ("encoder", ordinal_encoder, self.is_categorical_),
294
+ ("numerical", numerical_preprocessor, ~self.is_categorical_),
295
+ ]
296
+ )
297
+ self._preprocessor.set_output(transform="default")
298
+ X = self._preprocessor.fit_transform(X)
299
+ # check categories found by the OrdinalEncoder and get their encoded values
300
+ known_categories = self._check_categories()
301
+ self.n_features_in_ = self._preprocessor.n_features_in_
302
+ with suppress(AttributeError):
303
+ self.feature_names_in_ = self._preprocessor.feature_names_in_
304
+
305
+ # The ColumnTransformer's output places the categorical features at the
306
+ # beginning
307
+ categorical_remapped = np.zeros(n_features, dtype=bool)
308
+ categorical_remapped[self._preprocessor.output_indices_["encoder"]] = True
309
+ self._is_categorical_remapped = categorical_remapped
310
+
311
+ return X, known_categories
312
+
313
+ def _check_categories(self):
314
+ """Check categories found by the preprocessor and return their encoded values.
315
+
316
+ Returns a list of length ``self.n_features_in_``, with one entry per
317
+ input feature.
318
+
319
+ For non-categorical features, the corresponding entry is ``None``.
320
+
321
+ For categorical features, the corresponding entry is an array
322
+ containing the categories as encoded by the preprocessor (an
323
+ ``OrdinalEncoder``), excluding missing values. The entry is therefore
324
+ ``np.arange(n_categories)`` where ``n_categories`` is the number of
325
+ unique values in the considered feature column, after removing missing
326
+ values.
327
+
328
+ If ``n_categories > self.max_bins`` for any feature, a ``ValueError``
329
+ is raised.
330
+ """
331
+ encoder = self._preprocessor.named_transformers_["encoder"]
332
+ known_categories = [None] * self._preprocessor.n_features_in_
333
+ categorical_column_indices = np.arange(self._preprocessor.n_features_in_)[
334
+ self._preprocessor.output_indices_["encoder"]
335
+ ]
336
+ for feature_idx, categories in zip(
337
+ categorical_column_indices, encoder.categories_
338
+ ):
339
+ # OrdinalEncoder always puts np.nan as the last category if the
340
+ # training data has missing values. Here we remove it because it is
341
+ # already added by the _BinMapper.
342
+ if len(categories) and is_scalar_nan(categories[-1]):
343
+ categories = categories[:-1]
344
+ if categories.size > self.max_bins:
345
+ try:
346
+ feature_name = repr(encoder.feature_names_in_[feature_idx])
347
+ except AttributeError:
348
+ feature_name = f"at index {feature_idx}"
349
+ raise ValueError(
350
+ f"Categorical feature {feature_name} is expected to "
351
+ f"have a cardinality <= {self.max_bins} but actually "
352
+ f"has a cardinality of {categories.size}."
353
+ )
354
+ known_categories[feature_idx] = np.arange(len(categories), dtype=X_DTYPE)
355
+ return known_categories
356
+
357
+ def _check_categorical_features(self, X):
358
+ """Check and validate categorical features in X
359
+
360
+ Parameters
361
+ ----------
362
+ X : {array-like, pandas DataFrame} of shape (n_samples, n_features)
363
+ Input data.
364
+
365
+ Return
366
+ ------
367
+ is_categorical : ndarray of shape (n_features,) or None, dtype=bool
368
+ Indicates whether a feature is categorical. If no feature is
369
+ categorical, this is None.
370
+ """
371
+ # Special code for pandas because of a bug in recent pandas, which is
372
+ # fixed in main and maybe included in 2.2.1, see
373
+ # https://github.com/pandas-dev/pandas/pull/57173.
374
+ # Also pandas versions < 1.5.1 do not support the dataframe interchange
375
+ if _is_pandas_df(X):
376
+ X_is_dataframe = True
377
+ categorical_columns_mask = np.asarray(X.dtypes == "category")
378
+ X_has_categorical_columns = categorical_columns_mask.any()
379
+ elif hasattr(X, "__dataframe__"):
380
+ X_is_dataframe = True
381
+ categorical_columns_mask = np.asarray(
382
+ [
383
+ c.dtype[0].name == "CATEGORICAL"
384
+ for c in X.__dataframe__().get_columns()
385
+ ]
386
+ )
387
+ X_has_categorical_columns = categorical_columns_mask.any()
388
+ else:
389
+ X_is_dataframe = False
390
+ categorical_columns_mask = None
391
+ X_has_categorical_columns = False
392
+
393
+ # TODO(1.6): Remove warning and change default to "from_dtype" in v1.6
394
+ if (
395
+ isinstance(self.categorical_features, str)
396
+ and self.categorical_features == "warn"
397
+ ):
398
+ if X_has_categorical_columns:
399
+ warnings.warn(
400
+ (
401
+ "The categorical_features parameter will change to 'from_dtype'"
402
+ " in v1.6. The 'from_dtype' option automatically treats"
403
+ " categorical dtypes in a DataFrame as categorical features."
404
+ ),
405
+ FutureWarning,
406
+ )
407
+ categorical_features = None
408
+ else:
409
+ categorical_features = self.categorical_features
410
+
411
+ categorical_by_dtype = (
412
+ isinstance(categorical_features, str)
413
+ and categorical_features == "from_dtype"
414
+ )
415
+ no_categorical_dtype = categorical_features is None or (
416
+ categorical_by_dtype and not X_is_dataframe
417
+ )
418
+
419
+ if no_categorical_dtype:
420
+ return None
421
+
422
+ use_pandas_categorical = categorical_by_dtype and X_is_dataframe
423
+ if use_pandas_categorical:
424
+ categorical_features = categorical_columns_mask
425
+ else:
426
+ categorical_features = np.asarray(categorical_features)
427
+
428
+ if categorical_features.size == 0:
429
+ return None
430
+
431
+ if categorical_features.dtype.kind not in ("i", "b", "U", "O"):
432
+ raise ValueError(
433
+ "categorical_features must be an array-like of bool, int or "
434
+ f"str, got: {categorical_features.dtype.name}."
435
+ )
436
+
437
+ if categorical_features.dtype.kind == "O":
438
+ types = set(type(f) for f in categorical_features)
439
+ if types != {str}:
440
+ raise ValueError(
441
+ "categorical_features must be an array-like of bool, int or "
442
+ f"str, got: {', '.join(sorted(t.__name__ for t in types))}."
443
+ )
444
+
445
+ n_features = X.shape[1]
446
+ # At this point `_validate_data` was not called yet because we want to use the
447
+ # dtypes are used to discover the categorical features. Thus `feature_names_in_`
448
+ # is not defined yet.
449
+ feature_names_in_ = getattr(X, "columns", None)
450
+
451
+ if categorical_features.dtype.kind in ("U", "O"):
452
+ # check for feature names
453
+ if feature_names_in_ is None:
454
+ raise ValueError(
455
+ "categorical_features should be passed as an array of "
456
+ "integers or as a boolean mask when the model is fitted "
457
+ "on data without feature names."
458
+ )
459
+ is_categorical = np.zeros(n_features, dtype=bool)
460
+ feature_names = list(feature_names_in_)
461
+ for feature_name in categorical_features:
462
+ try:
463
+ is_categorical[feature_names.index(feature_name)] = True
464
+ except ValueError as e:
465
+ raise ValueError(
466
+ f"categorical_features has a item value '{feature_name}' "
467
+ "which is not a valid feature name of the training "
468
+ f"data. Observed feature names: {feature_names}"
469
+ ) from e
470
+ elif categorical_features.dtype.kind == "i":
471
+ # check for categorical features as indices
472
+ if (
473
+ np.max(categorical_features) >= n_features
474
+ or np.min(categorical_features) < 0
475
+ ):
476
+ raise ValueError(
477
+ "categorical_features set as integer "
478
+ "indices must be in [0, n_features - 1]"
479
+ )
480
+ is_categorical = np.zeros(n_features, dtype=bool)
481
+ is_categorical[categorical_features] = True
482
+ else:
483
+ if categorical_features.shape[0] != n_features:
484
+ raise ValueError(
485
+ "categorical_features set as a boolean mask "
486
+ "must have shape (n_features,), got: "
487
+ f"{categorical_features.shape}"
488
+ )
489
+ is_categorical = categorical_features
490
+
491
+ if not np.any(is_categorical):
492
+ return None
493
+ return is_categorical
494
+
495
+ def _check_interaction_cst(self, n_features):
496
+ """Check and validation for interaction constraints."""
497
+ if self.interaction_cst is None:
498
+ return None
499
+
500
+ if self.interaction_cst == "no_interactions":
501
+ interaction_cst = [[i] for i in range(n_features)]
502
+ elif self.interaction_cst == "pairwise":
503
+ interaction_cst = itertools.combinations(range(n_features), 2)
504
+ else:
505
+ interaction_cst = self.interaction_cst
506
+
507
+ try:
508
+ constraints = [set(group) for group in interaction_cst]
509
+ except TypeError:
510
+ raise ValueError(
511
+ "Interaction constraints must be a sequence of tuples or lists, got:"
512
+ f" {self.interaction_cst!r}."
513
+ )
514
+
515
+ for group in constraints:
516
+ for x in group:
517
+ if not (isinstance(x, Integral) and 0 <= x < n_features):
518
+ raise ValueError(
519
+ "Interaction constraints must consist of integer indices in"
520
+ f" [0, n_features - 1] = [0, {n_features - 1}], specifying the"
521
+ " position of features, got invalid indices:"
522
+ f" {group!r}"
523
+ )
524
+
525
+ # Add all not listed features as own group by default.
526
+ rest = set(range(n_features)) - set().union(*constraints)
527
+ if len(rest) > 0:
528
+ constraints.append(rest)
529
+
530
+ return constraints
531
+
532
+ @_fit_context(prefer_skip_nested_validation=True)
533
+ def fit(self, X, y, sample_weight=None):
534
+ """Fit the gradient boosting model.
535
+
536
+ Parameters
537
+ ----------
538
+ X : array-like of shape (n_samples, n_features)
539
+ The input samples.
540
+
541
+ y : array-like of shape (n_samples,)
542
+ Target values.
543
+
544
+ sample_weight : array-like of shape (n_samples,) default=None
545
+ Weights of training data.
546
+
547
+ .. versionadded:: 0.23
548
+
549
+ Returns
550
+ -------
551
+ self : object
552
+ Fitted estimator.
553
+ """
554
+ fit_start_time = time()
555
+ acc_find_split_time = 0.0 # time spent finding the best splits
556
+ acc_apply_split_time = 0.0 # time spent splitting nodes
557
+ acc_compute_hist_time = 0.0 # time spent computing histograms
558
+ # time spent predicting X for gradient and hessians update
559
+ acc_prediction_time = 0.0
560
+ X, known_categories = self._preprocess_X(X, reset=True)
561
+ y = _check_y(y, estimator=self)
562
+ y = self._encode_y(y)
563
+ check_consistent_length(X, y)
564
+ # Do not create unit sample weights by default to later skip some
565
+ # computation
566
+ if sample_weight is not None:
567
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
568
+ # TODO: remove when PDP supports sample weights
569
+ self._fitted_with_sw = True
570
+
571
+ sample_weight = self._finalize_sample_weight(sample_weight, y)
572
+
573
+ rng = check_random_state(self.random_state)
574
+
575
+ # When warm starting, we want to reuse the same seed that was used
576
+ # the first time fit was called (e.g. train/val split).
577
+ # For feature subsampling, we want to continue with the rng we started with.
578
+ if not self.warm_start or not self._is_fitted():
579
+ self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
580
+ feature_subsample_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
581
+ self._feature_subsample_rng = np.random.default_rng(feature_subsample_seed)
582
+
583
+ self._validate_parameters()
584
+ monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
585
+
586
+ # used for validation in predict
587
+ n_samples, self._n_features = X.shape
588
+
589
+ # Encode constraints into a list of sets of features indices (integers).
590
+ interaction_cst = self._check_interaction_cst(self._n_features)
591
+
592
+ # we need this stateful variable to tell raw_predict() that it was
593
+ # called from fit() (this current method), and that the data it has
594
+ # received is pre-binned.
595
+ # predicting is faster on pre-binned data, so we want early stopping
596
+ # predictions to be made on pre-binned data. Unfortunately the _scorer
597
+ # can only call predict() or predict_proba(), not raw_predict(), and
598
+ # there's no way to tell the scorer that it needs to predict binned
599
+ # data.
600
+ self._in_fit = True
601
+
602
+ # `_openmp_effective_n_threads` is used to take cgroups CPU quotes
603
+ # into account when determine the maximum number of threads to use.
604
+ n_threads = _openmp_effective_n_threads()
605
+
606
+ if isinstance(self.loss, str):
607
+ self._loss = self._get_loss(sample_weight=sample_weight)
608
+ elif isinstance(self.loss, BaseLoss):
609
+ self._loss = self.loss
610
+
611
+ if self.early_stopping == "auto":
612
+ self.do_early_stopping_ = n_samples > 10000
613
+ else:
614
+ self.do_early_stopping_ = self.early_stopping
615
+
616
+ # create validation data if needed
617
+ self._use_validation_data = self.validation_fraction is not None
618
+ if self.do_early_stopping_ and self._use_validation_data:
619
+ # stratify for classification
620
+ # instead of checking predict_proba, loss.n_classes >= 2 would also work
621
+ stratify = y if hasattr(self._loss, "predict_proba") else None
622
+
623
+ # Save the state of the RNG for the training and validation split.
624
+ # This is needed in order to have the same split when using
625
+ # warm starting.
626
+
627
+ if sample_weight is None:
628
+ X_train, X_val, y_train, y_val = train_test_split(
629
+ X,
630
+ y,
631
+ test_size=self.validation_fraction,
632
+ stratify=stratify,
633
+ random_state=self._random_seed,
634
+ )
635
+ sample_weight_train = sample_weight_val = None
636
+ else:
637
+ # TODO: incorporate sample_weight in sampling here, as well as
638
+ # stratify
639
+ (
640
+ X_train,
641
+ X_val,
642
+ y_train,
643
+ y_val,
644
+ sample_weight_train,
645
+ sample_weight_val,
646
+ ) = train_test_split(
647
+ X,
648
+ y,
649
+ sample_weight,
650
+ test_size=self.validation_fraction,
651
+ stratify=stratify,
652
+ random_state=self._random_seed,
653
+ )
654
+ else:
655
+ X_train, y_train, sample_weight_train = X, y, sample_weight
656
+ X_val = y_val = sample_weight_val = None
657
+
658
+ # Bin the data
659
+ # For ease of use of the API, the user-facing GBDT classes accept the
660
+ # parameter max_bins, which doesn't take into account the bin for
661
+ # missing values (which is always allocated). However, since max_bins
662
+ # isn't the true maximal number of bins, all other private classes
663
+ # (binmapper, histbuilder...) accept n_bins instead, which is the
664
+ # actual total number of bins. Everywhere in the code, the
665
+ # convention is that n_bins == max_bins + 1
666
+ n_bins = self.max_bins + 1 # + 1 for missing values
667
+ self._bin_mapper = _BinMapper(
668
+ n_bins=n_bins,
669
+ is_categorical=self._is_categorical_remapped,
670
+ known_categories=known_categories,
671
+ random_state=self._random_seed,
672
+ n_threads=n_threads,
673
+ )
674
+ X_binned_train = self._bin_data(X_train, is_training_data=True)
675
+ if X_val is not None:
676
+ X_binned_val = self._bin_data(X_val, is_training_data=False)
677
+ else:
678
+ X_binned_val = None
679
+
680
+ # Uses binned data to check for missing values
681
+ has_missing_values = (
682
+ (X_binned_train == self._bin_mapper.missing_values_bin_idx_)
683
+ .any(axis=0)
684
+ .astype(np.uint8)
685
+ )
686
+
687
+ if self.verbose:
688
+ print("Fitting gradient boosted rounds:")
689
+
690
+ n_samples = X_binned_train.shape[0]
691
+ scoring_is_predefined_string = self.scoring in _SCORERS
692
+ need_raw_predictions_val = X_binned_val is not None and (
693
+ scoring_is_predefined_string or self.scoring == "loss"
694
+ )
695
+ # First time calling fit, or no warm start
696
+ if not (self._is_fitted() and self.warm_start):
697
+ # Clear random state and score attributes
698
+ self._clear_state()
699
+
700
+ # initialize raw_predictions: those are the accumulated values
701
+ # predicted by the trees for the training data. raw_predictions has
702
+ # shape (n_samples, n_trees_per_iteration) where
703
+ # n_trees_per_iterations is n_classes in multiclass classification,
704
+ # else 1.
705
+ # self._baseline_prediction has shape (1, n_trees_per_iteration)
706
+ self._baseline_prediction = self._loss.fit_intercept_only(
707
+ y_true=y_train, sample_weight=sample_weight_train
708
+ ).reshape((1, -1))
709
+ raw_predictions = np.zeros(
710
+ shape=(n_samples, self.n_trees_per_iteration_),
711
+ dtype=self._baseline_prediction.dtype,
712
+ order="F",
713
+ )
714
+ raw_predictions += self._baseline_prediction
715
+
716
+ # predictors is a matrix (list of lists) of TreePredictor objects
717
+ # with shape (n_iter_, n_trees_per_iteration)
718
+ self._predictors = predictors = []
719
+
720
+ # Initialize structures and attributes related to early stopping
721
+ self._scorer = None # set if scoring != loss
722
+ raw_predictions_val = None # set if use val and scoring is a string
723
+ self.train_score_ = []
724
+ self.validation_score_ = []
725
+
726
+ if self.do_early_stopping_:
727
+ # populate train_score and validation_score with the
728
+ # predictions of the initial model (before the first tree)
729
+
730
+ # Create raw_predictions_val for storing the raw predictions of
731
+ # the validation data.
732
+ if need_raw_predictions_val:
733
+ raw_predictions_val = np.zeros(
734
+ shape=(X_binned_val.shape[0], self.n_trees_per_iteration_),
735
+ dtype=self._baseline_prediction.dtype,
736
+ order="F",
737
+ )
738
+
739
+ raw_predictions_val += self._baseline_prediction
740
+
741
+ if self.scoring == "loss":
742
+ # we're going to compute scoring w.r.t the loss. As losses
743
+ # take raw predictions as input (unlike the scorers), we
744
+ # can optimize a bit and avoid repeating computing the
745
+ # predictions of the previous trees. We'll reuse
746
+ # raw_predictions (as it's needed for training anyway) for
747
+ # evaluating the training loss.
748
+
749
+ self._check_early_stopping_loss(
750
+ raw_predictions=raw_predictions,
751
+ y_train=y_train,
752
+ sample_weight_train=sample_weight_train,
753
+ raw_predictions_val=raw_predictions_val,
754
+ y_val=y_val,
755
+ sample_weight_val=sample_weight_val,
756
+ n_threads=n_threads,
757
+ )
758
+ else:
759
+ self._scorer = check_scoring(self, self.scoring)
760
+ # _scorer is a callable with signature (est, X, y) and
761
+ # calls est.predict() or est.predict_proba() depending on
762
+ # its nature.
763
+ # Unfortunately, each call to _scorer() will compute
764
+ # the predictions of all the trees. So we use a subset of
765
+ # the training set to compute train scores.
766
+
767
+ # Compute the subsample set
768
+ (
769
+ X_binned_small_train,
770
+ y_small_train,
771
+ sample_weight_small_train,
772
+ indices_small_train,
773
+ ) = self._get_small_trainset(
774
+ X_binned_train,
775
+ y_train,
776
+ sample_weight_train,
777
+ self._random_seed,
778
+ )
779
+
780
+ # If the scorer is a predefined string, then we optimize
781
+ # the evaluation by re-using the incrementally updated raw
782
+ # predictions.
783
+ if scoring_is_predefined_string:
784
+ raw_predictions_small_train = raw_predictions[
785
+ indices_small_train
786
+ ]
787
+ else:
788
+ raw_predictions_small_train = None
789
+
790
+ self._check_early_stopping_scorer(
791
+ X_binned_small_train,
792
+ y_small_train,
793
+ sample_weight_small_train,
794
+ X_binned_val,
795
+ y_val,
796
+ sample_weight_val,
797
+ raw_predictions_small_train=raw_predictions_small_train,
798
+ raw_predictions_val=raw_predictions_val,
799
+ )
800
+ begin_at_stage = 0
801
+
802
+ # warm start: this is not the first time fit was called
803
+ else:
804
+ # Check that the maximum number of iterations is not smaller
805
+ # than the number of iterations from the previous fit
806
+ if self.max_iter < self.n_iter_:
807
+ raise ValueError(
808
+ "max_iter=%d must be larger than or equal to "
809
+ "n_iter_=%d when warm_start==True" % (self.max_iter, self.n_iter_)
810
+ )
811
+
812
+ # Convert array attributes to lists
813
+ self.train_score_ = self.train_score_.tolist()
814
+ self.validation_score_ = self.validation_score_.tolist()
815
+
816
+ # Compute raw predictions
817
+ raw_predictions = self._raw_predict(X_binned_train, n_threads=n_threads)
818
+ if self.do_early_stopping_ and need_raw_predictions_val:
819
+ raw_predictions_val = self._raw_predict(
820
+ X_binned_val, n_threads=n_threads
821
+ )
822
+ else:
823
+ raw_predictions_val = None
824
+
825
+ if self.do_early_stopping_ and self.scoring != "loss":
826
+ # Compute the subsample set
827
+ (
828
+ X_binned_small_train,
829
+ y_small_train,
830
+ sample_weight_small_train,
831
+ indices_small_train,
832
+ ) = self._get_small_trainset(
833
+ X_binned_train, y_train, sample_weight_train, self._random_seed
834
+ )
835
+
836
+ # Get the predictors from the previous fit
837
+ predictors = self._predictors
838
+
839
+ begin_at_stage = self.n_iter_
840
+
841
+ # initialize gradients and hessians (empty arrays).
842
+ # shape = (n_samples, n_trees_per_iteration).
843
+ gradient, hessian = self._loss.init_gradient_and_hessian(
844
+ n_samples=n_samples, dtype=G_H_DTYPE, order="F"
845
+ )
846
+
847
+ for iteration in range(begin_at_stage, self.max_iter):
848
+ if self.verbose:
849
+ iteration_start_time = time()
850
+ print(
851
+ "[{}/{}] ".format(iteration + 1, self.max_iter), end="", flush=True
852
+ )
853
+
854
+ # Update gradients and hessians, inplace
855
+ # Note that self._loss expects shape (n_samples,) for
856
+ # n_trees_per_iteration = 1 else shape (n_samples, n_trees_per_iteration).
857
+ if self._loss.constant_hessian:
858
+ self._loss.gradient(
859
+ y_true=y_train,
860
+ raw_prediction=raw_predictions,
861
+ sample_weight=sample_weight_train,
862
+ gradient_out=gradient,
863
+ n_threads=n_threads,
864
+ )
865
+ else:
866
+ self._loss.gradient_hessian(
867
+ y_true=y_train,
868
+ raw_prediction=raw_predictions,
869
+ sample_weight=sample_weight_train,
870
+ gradient_out=gradient,
871
+ hessian_out=hessian,
872
+ n_threads=n_threads,
873
+ )
874
+
875
+ # Append a list since there may be more than 1 predictor per iter
876
+ predictors.append([])
877
+
878
+ # 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
879
+ # on gradient and hessian to simplify the loop over n_trees_per_iteration_.
880
+ if gradient.ndim == 1:
881
+ g_view = gradient.reshape((-1, 1))
882
+ h_view = hessian.reshape((-1, 1))
883
+ else:
884
+ g_view = gradient
885
+ h_view = hessian
886
+
887
+ # Build `n_trees_per_iteration` trees.
888
+ for k in range(self.n_trees_per_iteration_):
889
+ grower = TreeGrower(
890
+ X_binned=X_binned_train,
891
+ gradients=g_view[:, k],
892
+ hessians=h_view[:, k],
893
+ n_bins=n_bins,
894
+ n_bins_non_missing=self._bin_mapper.n_bins_non_missing_,
895
+ has_missing_values=has_missing_values,
896
+ is_categorical=self._is_categorical_remapped,
897
+ monotonic_cst=monotonic_cst,
898
+ interaction_cst=interaction_cst,
899
+ max_leaf_nodes=self.max_leaf_nodes,
900
+ max_depth=self.max_depth,
901
+ min_samples_leaf=self.min_samples_leaf,
902
+ l2_regularization=self.l2_regularization,
903
+ feature_fraction_per_split=self.max_features,
904
+ rng=self._feature_subsample_rng,
905
+ shrinkage=self.learning_rate,
906
+ n_threads=n_threads,
907
+ )
908
+ grower.grow()
909
+
910
+ acc_apply_split_time += grower.total_apply_split_time
911
+ acc_find_split_time += grower.total_find_split_time
912
+ acc_compute_hist_time += grower.total_compute_hist_time
913
+
914
+ if not self._loss.differentiable:
915
+ _update_leaves_values(
916
+ loss=self._loss,
917
+ grower=grower,
918
+ y_true=y_train,
919
+ raw_prediction=raw_predictions[:, k],
920
+ sample_weight=sample_weight_train,
921
+ )
922
+
923
+ predictor = grower.make_predictor(
924
+ binning_thresholds=self._bin_mapper.bin_thresholds_
925
+ )
926
+ predictors[-1].append(predictor)
927
+
928
+ # Update raw_predictions with the predictions of the newly
929
+ # created tree.
930
+ tic_pred = time()
931
+ _update_raw_predictions(raw_predictions[:, k], grower, n_threads)
932
+ toc_pred = time()
933
+ acc_prediction_time += toc_pred - tic_pred
934
+
935
+ should_early_stop = False
936
+ if self.do_early_stopping_:
937
+ # Update raw_predictions_val with the newest tree(s)
938
+ if need_raw_predictions_val:
939
+ for k, pred in enumerate(self._predictors[-1]):
940
+ raw_predictions_val[:, k] += pred.predict_binned(
941
+ X_binned_val,
942
+ self._bin_mapper.missing_values_bin_idx_,
943
+ n_threads,
944
+ )
945
+
946
+ if self.scoring == "loss":
947
+ should_early_stop = self._check_early_stopping_loss(
948
+ raw_predictions=raw_predictions,
949
+ y_train=y_train,
950
+ sample_weight_train=sample_weight_train,
951
+ raw_predictions_val=raw_predictions_val,
952
+ y_val=y_val,
953
+ sample_weight_val=sample_weight_val,
954
+ n_threads=n_threads,
955
+ )
956
+
957
+ else:
958
+ # If the scorer is a predefined string, then we optimize the
959
+ # evaluation by re-using the incrementally computed raw predictions.
960
+ if scoring_is_predefined_string:
961
+ raw_predictions_small_train = raw_predictions[
962
+ indices_small_train
963
+ ]
964
+ else:
965
+ raw_predictions_small_train = None
966
+
967
+ should_early_stop = self._check_early_stopping_scorer(
968
+ X_binned_small_train,
969
+ y_small_train,
970
+ sample_weight_small_train,
971
+ X_binned_val,
972
+ y_val,
973
+ sample_weight_val,
974
+ raw_predictions_small_train=raw_predictions_small_train,
975
+ raw_predictions_val=raw_predictions_val,
976
+ )
977
+
978
+ if self.verbose:
979
+ self._print_iteration_stats(iteration_start_time)
980
+
981
+ # maybe we could also early stop if all the trees are stumps?
982
+ if should_early_stop:
983
+ break
984
+
985
+ if self.verbose:
986
+ duration = time() - fit_start_time
987
+ n_total_leaves = sum(
988
+ predictor.get_n_leaf_nodes()
989
+ for predictors_at_ith_iteration in self._predictors
990
+ for predictor in predictors_at_ith_iteration
991
+ )
992
+ n_predictors = sum(
993
+ len(predictors_at_ith_iteration)
994
+ for predictors_at_ith_iteration in self._predictors
995
+ )
996
+ print(
997
+ "Fit {} trees in {:.3f} s, ({} total leaves)".format(
998
+ n_predictors, duration, n_total_leaves
999
+ )
1000
+ )
1001
+ print(
1002
+ "{:<32} {:.3f}s".format(
1003
+ "Time spent computing histograms:", acc_compute_hist_time
1004
+ )
1005
+ )
1006
+ print(
1007
+ "{:<32} {:.3f}s".format(
1008
+ "Time spent finding best splits:", acc_find_split_time
1009
+ )
1010
+ )
1011
+ print(
1012
+ "{:<32} {:.3f}s".format(
1013
+ "Time spent applying splits:", acc_apply_split_time
1014
+ )
1015
+ )
1016
+ print(
1017
+ "{:<32} {:.3f}s".format("Time spent predicting:", acc_prediction_time)
1018
+ )
1019
+
1020
+ self.train_score_ = np.asarray(self.train_score_)
1021
+ self.validation_score_ = np.asarray(self.validation_score_)
1022
+ del self._in_fit # hard delete so we're sure it can't be used anymore
1023
+ return self
1024
+
1025
+ def _is_fitted(self):
1026
+ return len(getattr(self, "_predictors", [])) > 0
1027
+
1028
+ def _clear_state(self):
1029
+ """Clear the state of the gradient boosting model."""
1030
+ for var in ("train_score_", "validation_score_"):
1031
+ if hasattr(self, var):
1032
+ delattr(self, var)
1033
+
1034
+ def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed):
1035
+ """Compute the indices of the subsample set and return this set.
1036
+
1037
+ For efficiency, we need to subsample the training set to compute scores
1038
+ with scorers.
1039
+ """
1040
+ # TODO: incorporate sample_weights here in `resample`
1041
+ subsample_size = 10000
1042
+ if X_binned_train.shape[0] > subsample_size:
1043
+ indices = np.arange(X_binned_train.shape[0])
1044
+ stratify = y_train if is_classifier(self) else None
1045
+ indices = resample(
1046
+ indices,
1047
+ n_samples=subsample_size,
1048
+ replace=False,
1049
+ random_state=seed,
1050
+ stratify=stratify,
1051
+ )
1052
+ X_binned_small_train = X_binned_train[indices]
1053
+ y_small_train = y_train[indices]
1054
+ if sample_weight_train is not None:
1055
+ sample_weight_small_train = sample_weight_train[indices]
1056
+ else:
1057
+ sample_weight_small_train = None
1058
+ X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
1059
+ return (
1060
+ X_binned_small_train,
1061
+ y_small_train,
1062
+ sample_weight_small_train,
1063
+ indices,
1064
+ )
1065
+ else:
1066
+ return X_binned_train, y_train, sample_weight_train, slice(None)
1067
+
1068
+ def _check_early_stopping_scorer(
1069
+ self,
1070
+ X_binned_small_train,
1071
+ y_small_train,
1072
+ sample_weight_small_train,
1073
+ X_binned_val,
1074
+ y_val,
1075
+ sample_weight_val,
1076
+ raw_predictions_small_train=None,
1077
+ raw_predictions_val=None,
1078
+ ):
1079
+ """Check if fitting should be early-stopped based on scorer.
1080
+
1081
+ Scores are computed on validation data or on training data.
1082
+ """
1083
+ if is_classifier(self):
1084
+ y_small_train = self.classes_[y_small_train.astype(int)]
1085
+
1086
+ self.train_score_.append(
1087
+ self._score_with_raw_predictions(
1088
+ X_binned_small_train,
1089
+ y_small_train,
1090
+ sample_weight_small_train,
1091
+ raw_predictions_small_train,
1092
+ )
1093
+ )
1094
+
1095
+ if self._use_validation_data:
1096
+ if is_classifier(self):
1097
+ y_val = self.classes_[y_val.astype(int)]
1098
+ self.validation_score_.append(
1099
+ self._score_with_raw_predictions(
1100
+ X_binned_val, y_val, sample_weight_val, raw_predictions_val
1101
+ )
1102
+ )
1103
+ return self._should_stop(self.validation_score_)
1104
+ else:
1105
+ return self._should_stop(self.train_score_)
1106
+
1107
+ def _score_with_raw_predictions(self, X, y, sample_weight, raw_predictions=None):
1108
+ if raw_predictions is None:
1109
+ patcher_raw_predict = nullcontext()
1110
+ else:
1111
+ patcher_raw_predict = _patch_raw_predict(self, raw_predictions)
1112
+
1113
+ with patcher_raw_predict:
1114
+ if sample_weight is None:
1115
+ return self._scorer(self, X, y)
1116
+ else:
1117
+ return self._scorer(self, X, y, sample_weight=sample_weight)
1118
+
1119
+ def _check_early_stopping_loss(
1120
+ self,
1121
+ raw_predictions,
1122
+ y_train,
1123
+ sample_weight_train,
1124
+ raw_predictions_val,
1125
+ y_val,
1126
+ sample_weight_val,
1127
+ n_threads=1,
1128
+ ):
1129
+ """Check if fitting should be early-stopped based on loss.
1130
+
1131
+ Scores are computed on validation data or on training data.
1132
+ """
1133
+ self.train_score_.append(
1134
+ -self._loss(
1135
+ y_true=y_train,
1136
+ raw_prediction=raw_predictions,
1137
+ sample_weight=sample_weight_train,
1138
+ n_threads=n_threads,
1139
+ )
1140
+ )
1141
+
1142
+ if self._use_validation_data:
1143
+ self.validation_score_.append(
1144
+ -self._loss(
1145
+ y_true=y_val,
1146
+ raw_prediction=raw_predictions_val,
1147
+ sample_weight=sample_weight_val,
1148
+ n_threads=n_threads,
1149
+ )
1150
+ )
1151
+ return self._should_stop(self.validation_score_)
1152
+ else:
1153
+ return self._should_stop(self.train_score_)
1154
+
1155
+ def _should_stop(self, scores):
1156
+ """
1157
+ Return True (do early stopping) if the last n scores aren't better
1158
+ than the (n-1)th-to-last score, up to some tolerance.
1159
+ """
1160
+ reference_position = self.n_iter_no_change + 1
1161
+ if len(scores) < reference_position:
1162
+ return False
1163
+
1164
+ # A higher score is always better. Higher tol means that it will be
1165
+ # harder for subsequent iteration to be considered an improvement upon
1166
+ # the reference score, and therefore it is more likely to early stop
1167
+ # because of the lack of significant improvement.
1168
+ reference_score = scores[-reference_position] + self.tol
1169
+ recent_scores = scores[-reference_position + 1 :]
1170
+ recent_improvements = [score > reference_score for score in recent_scores]
1171
+ return not any(recent_improvements)
1172
+
1173
+ def _bin_data(self, X, is_training_data):
1174
+ """Bin data X.
1175
+
1176
+ If is_training_data, then fit the _bin_mapper attribute.
1177
+ Else, the binned data is converted to a C-contiguous array.
1178
+ """
1179
+
1180
+ description = "training" if is_training_data else "validation"
1181
+ if self.verbose:
1182
+ print(
1183
+ "Binning {:.3f} GB of {} data: ".format(X.nbytes / 1e9, description),
1184
+ end="",
1185
+ flush=True,
1186
+ )
1187
+ tic = time()
1188
+ if is_training_data:
1189
+ X_binned = self._bin_mapper.fit_transform(X) # F-aligned array
1190
+ else:
1191
+ X_binned = self._bin_mapper.transform(X) # F-aligned array
1192
+ # We convert the array to C-contiguous since predicting is faster
1193
+ # with this layout (training is faster on F-arrays though)
1194
+ X_binned = np.ascontiguousarray(X_binned)
1195
+ toc = time()
1196
+ if self.verbose:
1197
+ duration = toc - tic
1198
+ print("{:.3f} s".format(duration))
1199
+
1200
+ return X_binned
1201
+
1202
+ def _print_iteration_stats(self, iteration_start_time):
1203
+ """Print info about the current fitting iteration."""
1204
+ log_msg = ""
1205
+
1206
+ predictors_of_ith_iteration = [
1207
+ predictors_list
1208
+ for predictors_list in self._predictors[-1]
1209
+ if predictors_list
1210
+ ]
1211
+ n_trees = len(predictors_of_ith_iteration)
1212
+ max_depth = max(
1213
+ predictor.get_max_depth() for predictor in predictors_of_ith_iteration
1214
+ )
1215
+ n_leaves = sum(
1216
+ predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration
1217
+ )
1218
+
1219
+ if n_trees == 1:
1220
+ log_msg += "{} tree, {} leaves, ".format(n_trees, n_leaves)
1221
+ else:
1222
+ log_msg += "{} trees, {} leaves ".format(n_trees, n_leaves)
1223
+ log_msg += "({} on avg), ".format(int(n_leaves / n_trees))
1224
+
1225
+ log_msg += "max depth = {}, ".format(max_depth)
1226
+
1227
+ if self.do_early_stopping_:
1228
+ if self.scoring == "loss":
1229
+ factor = -1 # score_ arrays contain the negative loss
1230
+ name = "loss"
1231
+ else:
1232
+ factor = 1
1233
+ name = "score"
1234
+ log_msg += "train {}: {:.5f}, ".format(name, factor * self.train_score_[-1])
1235
+ if self._use_validation_data:
1236
+ log_msg += "val {}: {:.5f}, ".format(
1237
+ name, factor * self.validation_score_[-1]
1238
+ )
1239
+
1240
+ iteration_time = time() - iteration_start_time
1241
+ log_msg += "in {:0.3f}s".format(iteration_time)
1242
+
1243
+ print(log_msg)
1244
+
1245
+ def _raw_predict(self, X, n_threads=None):
1246
+ """Return the sum of the leaves values over all predictors.
1247
+
1248
+ Parameters
1249
+ ----------
1250
+ X : array-like of shape (n_samples, n_features)
1251
+ The input samples.
1252
+ n_threads : int, default=None
1253
+ Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
1254
+ to determine the effective number of threads use, which takes cgroups CPU
1255
+ quotes into account. See the docstring of `_openmp_effective_n_threads`
1256
+ for details.
1257
+
1258
+ Returns
1259
+ -------
1260
+ raw_predictions : array, shape (n_samples, n_trees_per_iteration)
1261
+ The raw predicted values.
1262
+ """
1263
+ check_is_fitted(self)
1264
+ is_binned = getattr(self, "_in_fit", False)
1265
+ if not is_binned:
1266
+ X = self._preprocess_X(X, reset=False)
1267
+
1268
+ n_samples = X.shape[0]
1269
+ raw_predictions = np.zeros(
1270
+ shape=(n_samples, self.n_trees_per_iteration_),
1271
+ dtype=self._baseline_prediction.dtype,
1272
+ order="F",
1273
+ )
1274
+ raw_predictions += self._baseline_prediction
1275
+
1276
+ # We intentionally decouple the number of threads used at prediction
1277
+ # time from the number of threads used at fit time because the model
1278
+ # can be deployed on a different machine for prediction purposes.
1279
+ n_threads = _openmp_effective_n_threads(n_threads)
1280
+ self._predict_iterations(
1281
+ X, self._predictors, raw_predictions, is_binned, n_threads
1282
+ )
1283
+ return raw_predictions
1284
+
1285
+ def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads):
1286
+ """Add the predictions of the predictors to raw_predictions."""
1287
+ if not is_binned:
1288
+ (
1289
+ known_cat_bitsets,
1290
+ f_idx_map,
1291
+ ) = self._bin_mapper.make_known_categories_bitsets()
1292
+
1293
+ for predictors_of_ith_iteration in predictors:
1294
+ for k, predictor in enumerate(predictors_of_ith_iteration):
1295
+ if is_binned:
1296
+ predict = partial(
1297
+ predictor.predict_binned,
1298
+ missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_,
1299
+ n_threads=n_threads,
1300
+ )
1301
+ else:
1302
+ predict = partial(
1303
+ predictor.predict,
1304
+ known_cat_bitsets=known_cat_bitsets,
1305
+ f_idx_map=f_idx_map,
1306
+ n_threads=n_threads,
1307
+ )
1308
+ raw_predictions[:, k] += predict(X)
1309
+
1310
+ def _staged_raw_predict(self, X):
1311
+ """Compute raw predictions of ``X`` for each iteration.
1312
+
1313
+ This method allows monitoring (i.e. determine error on testing set)
1314
+ after each stage.
1315
+
1316
+ Parameters
1317
+ ----------
1318
+ X : array-like of shape (n_samples, n_features)
1319
+ The input samples.
1320
+
1321
+ Yields
1322
+ ------
1323
+ raw_predictions : generator of ndarray of shape \
1324
+ (n_samples, n_trees_per_iteration)
1325
+ The raw predictions of the input samples. The order of the
1326
+ classes corresponds to that in the attribute :term:`classes_`.
1327
+ """
1328
+ check_is_fitted(self)
1329
+ X = self._preprocess_X(X, reset=False)
1330
+ if X.shape[1] != self._n_features:
1331
+ raise ValueError(
1332
+ "X has {} features but this estimator was trained with "
1333
+ "{} features.".format(X.shape[1], self._n_features)
1334
+ )
1335
+ n_samples = X.shape[0]
1336
+ raw_predictions = np.zeros(
1337
+ shape=(n_samples, self.n_trees_per_iteration_),
1338
+ dtype=self._baseline_prediction.dtype,
1339
+ order="F",
1340
+ )
1341
+ raw_predictions += self._baseline_prediction
1342
+
1343
+ # We intentionally decouple the number of threads used at prediction
1344
+ # time from the number of threads used at fit time because the model
1345
+ # can be deployed on a different machine for prediction purposes.
1346
+ n_threads = _openmp_effective_n_threads()
1347
+ for iteration in range(len(self._predictors)):
1348
+ self._predict_iterations(
1349
+ X,
1350
+ self._predictors[iteration : iteration + 1],
1351
+ raw_predictions,
1352
+ is_binned=False,
1353
+ n_threads=n_threads,
1354
+ )
1355
+ yield raw_predictions.copy()
1356
+
1357
+ def _compute_partial_dependence_recursion(self, grid, target_features):
1358
+ """Fast partial dependence computation.
1359
+
1360
+ Parameters
1361
+ ----------
1362
+ grid : ndarray, shape (n_samples, n_target_features)
1363
+ The grid points on which the partial dependence should be
1364
+ evaluated.
1365
+ target_features : ndarray, shape (n_target_features)
1366
+ The set of target features for which the partial dependence
1367
+ should be evaluated.
1368
+
1369
+ Returns
1370
+ -------
1371
+ averaged_predictions : ndarray, shape \
1372
+ (n_trees_per_iteration, n_samples)
1373
+ The value of the partial dependence function on each grid point.
1374
+ """
1375
+
1376
+ if getattr(self, "_fitted_with_sw", False):
1377
+ raise NotImplementedError(
1378
+ "{} does not support partial dependence "
1379
+ "plots with the 'recursion' method when "
1380
+ "sample weights were given during fit "
1381
+ "time.".format(self.__class__.__name__)
1382
+ )
1383
+
1384
+ grid = np.asarray(grid, dtype=X_DTYPE, order="C")
1385
+ averaged_predictions = np.zeros(
1386
+ (self.n_trees_per_iteration_, grid.shape[0]), dtype=Y_DTYPE
1387
+ )
1388
+
1389
+ for predictors_of_ith_iteration in self._predictors:
1390
+ for k, predictor in enumerate(predictors_of_ith_iteration):
1391
+ predictor.compute_partial_dependence(
1392
+ grid, target_features, averaged_predictions[k]
1393
+ )
1394
+ # Note that the learning rate is already accounted for in the leaves
1395
+ # values.
1396
+
1397
+ return averaged_predictions
1398
+
1399
+ def _more_tags(self):
1400
+ return {"allow_nan": True}
1401
+
1402
+ @abstractmethod
1403
+ def _get_loss(self, sample_weight):
1404
+ pass
1405
+
1406
+ @abstractmethod
1407
+ def _encode_y(self, y=None):
1408
+ pass
1409
+
1410
+ @property
1411
+ def n_iter_(self):
1412
+ """Number of iterations of the boosting process."""
1413
+ check_is_fitted(self)
1414
+ return len(self._predictors)
1415
+
1416
+
1417
+ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
1418
+ """Histogram-based Gradient Boosting Regression Tree.
1419
+
1420
+ This estimator is much faster than
1421
+ :class:`GradientBoostingRegressor<sklearn.ensemble.GradientBoostingRegressor>`
1422
+ for big datasets (n_samples >= 10 000).
1423
+
1424
+ This estimator has native support for missing values (NaNs). During
1425
+ training, the tree grower learns at each split point whether samples
1426
+ with missing values should go to the left or right child, based on the
1427
+ potential gain. When predicting, samples with missing values are
1428
+ assigned to the left or right child consequently. If no missing values
1429
+ were encountered for a given feature during training, then samples with
1430
+ missing values are mapped to whichever child has the most samples.
1431
+
1432
+ This implementation is inspired by
1433
+ `LightGBM <https://github.com/Microsoft/LightGBM>`_.
1434
+
1435
+ Read more in the :ref:`User Guide <histogram_based_gradient_boosting>`.
1436
+
1437
+ .. versionadded:: 0.21
1438
+
1439
+ Parameters
1440
+ ----------
1441
+ loss : {'squared_error', 'absolute_error', 'gamma', 'poisson', 'quantile'}, \
1442
+ default='squared_error'
1443
+ The loss function to use in the boosting process. Note that the
1444
+ "squared error", "gamma" and "poisson" losses actually implement
1445
+ "half least squares loss", "half gamma deviance" and "half poisson
1446
+ deviance" to simplify the computation of the gradient. Furthermore,
1447
+ "gamma" and "poisson" losses internally use a log-link, "gamma"
1448
+ requires ``y > 0`` and "poisson" requires ``y >= 0``.
1449
+ "quantile" uses the pinball loss.
1450
+
1451
+ .. versionchanged:: 0.23
1452
+ Added option 'poisson'.
1453
+
1454
+ .. versionchanged:: 1.1
1455
+ Added option 'quantile'.
1456
+
1457
+ .. versionchanged:: 1.3
1458
+ Added option 'gamma'.
1459
+
1460
+ quantile : float, default=None
1461
+ If loss is "quantile", this parameter specifies which quantile to be estimated
1462
+ and must be between 0 and 1.
1463
+ learning_rate : float, default=0.1
1464
+ The learning rate, also known as *shrinkage*. This is used as a
1465
+ multiplicative factor for the leaves values. Use ``1`` for no
1466
+ shrinkage.
1467
+ max_iter : int, default=100
1468
+ The maximum number of iterations of the boosting process, i.e. the
1469
+ maximum number of trees.
1470
+ max_leaf_nodes : int or None, default=31
1471
+ The maximum number of leaves for each tree. Must be strictly greater
1472
+ than 1. If None, there is no maximum limit.
1473
+ max_depth : int or None, default=None
1474
+ The maximum depth of each tree. The depth of a tree is the number of
1475
+ edges to go from the root to the deepest leaf.
1476
+ Depth isn't constrained by default.
1477
+ min_samples_leaf : int, default=20
1478
+ The minimum number of samples per leaf. For small datasets with less
1479
+ than a few hundred samples, it is recommended to lower this value
1480
+ since only very shallow trees would be built.
1481
+ l2_regularization : float, default=0
1482
+ The L2 regularization parameter. Use ``0`` for no regularization (default).
1483
+ max_features : float, default=1.0
1484
+ Proportion of randomly chosen features in each and every node split.
1485
+ This is a form of regularization, smaller values make the trees weaker
1486
+ learners and might prevent overfitting.
1487
+ If interaction constraints from `interaction_cst` are present, only allowed
1488
+ features are taken into account for the subsampling.
1489
+
1490
+ .. versionadded:: 1.4
1491
+
1492
+ max_bins : int, default=255
1493
+ The maximum number of bins to use for non-missing values. Before
1494
+ training, each feature of the input array `X` is binned into
1495
+ integer-valued bins, which allows for a much faster training stage.
1496
+ Features with a small number of unique values may use less than
1497
+ ``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin
1498
+ is always reserved for missing values. Must be no larger than 255.
1499
+ categorical_features : array-like of {bool, int, str} of shape (n_features) \
1500
+ or shape (n_categorical_features,), default=None
1501
+ Indicates the categorical features.
1502
+
1503
+ - None : no feature will be considered categorical.
1504
+ - boolean array-like : boolean mask indicating categorical features.
1505
+ - integer array-like : integer indices indicating categorical
1506
+ features.
1507
+ - str array-like: names of categorical features (assuming the training
1508
+ data has feature names).
1509
+ - `"from_dtype"`: dataframe columns with dtype "category" are
1510
+ considered to be categorical features. The input must be an object
1511
+ exposing a ``__dataframe__`` method such as pandas or polars
1512
+ DataFrames to use this feature.
1513
+
1514
+ For each categorical feature, there must be at most `max_bins` unique
1515
+ categories. Negative values for categorical features encoded as numeric
1516
+ dtypes are treated as missing values. All categorical values are
1517
+ converted to floating point numbers. This means that categorical values
1518
+ of 1.0 and 1 are treated as the same category.
1519
+
1520
+ Read more in the :ref:`User Guide <categorical_support_gbdt>`.
1521
+
1522
+ .. versionadded:: 0.24
1523
+
1524
+ .. versionchanged:: 1.2
1525
+ Added support for feature names.
1526
+
1527
+ .. versionchanged:: 1.4
1528
+ Added `"from_dtype"` option. The default will change to `"from_dtype"` in
1529
+ v1.6.
1530
+
1531
+ monotonic_cst : array-like of int of shape (n_features) or dict, default=None
1532
+ Monotonic constraint to enforce on each feature are specified using the
1533
+ following integer values:
1534
+
1535
+ - 1: monotonic increase
1536
+ - 0: no constraint
1537
+ - -1: monotonic decrease
1538
+
1539
+ If a dict with str keys, map feature to monotonic constraints by name.
1540
+ If an array, the features are mapped to constraints by position. See
1541
+ :ref:`monotonic_cst_features_names` for a usage example.
1542
+
1543
+ Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
1544
+
1545
+ .. versionadded:: 0.23
1546
+
1547
+ .. versionchanged:: 1.2
1548
+ Accept dict of constraints with feature names as keys.
1549
+
1550
+ interaction_cst : {"pairwise", "no_interactions"} or sequence of lists/tuples/sets \
1551
+ of int, default=None
1552
+ Specify interaction constraints, the sets of features which can
1553
+ interact with each other in child node splits.
1554
+
1555
+ Each item specifies the set of feature indices that are allowed
1556
+ to interact with each other. If there are more features than
1557
+ specified in these constraints, they are treated as if they were
1558
+ specified as an additional set.
1559
+
1560
+ The strings "pairwise" and "no_interactions" are shorthands for
1561
+ allowing only pairwise or no interactions, respectively.
1562
+
1563
+ For instance, with 5 features in total, `interaction_cst=[{0, 1}]`
1564
+ is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`,
1565
+ and specifies that each branch of a tree will either only split
1566
+ on features 0 and 1 or only split on features 2, 3 and 4.
1567
+
1568
+ .. versionadded:: 1.2
1569
+
1570
+ warm_start : bool, default=False
1571
+ When set to ``True``, reuse the solution of the previous call to fit
1572
+ and add more estimators to the ensemble. For results to be valid, the
1573
+ estimator should be re-trained on the same data only.
1574
+ See :term:`the Glossary <warm_start>`.
1575
+ early_stopping : 'auto' or bool, default='auto'
1576
+ If 'auto', early stopping is enabled if the sample size is larger than
1577
+ 10000. If True, early stopping is enabled, otherwise early stopping is
1578
+ disabled.
1579
+
1580
+ .. versionadded:: 0.23
1581
+
1582
+ scoring : str or callable or None, default='loss'
1583
+ Scoring parameter to use for early stopping. It can be a single
1584
+ string (see :ref:`scoring_parameter`) or a callable (see
1585
+ :ref:`scoring`). If None, the estimator's default scorer is used. If
1586
+ ``scoring='loss'``, early stopping is checked w.r.t the loss value.
1587
+ Only used if early stopping is performed.
1588
+ validation_fraction : int or float or None, default=0.1
1589
+ Proportion (or absolute size) of training data to set aside as
1590
+ validation data for early stopping. If None, early stopping is done on
1591
+ the training data. Only used if early stopping is performed.
1592
+ n_iter_no_change : int, default=10
1593
+ Used to determine when to "early stop". The fitting process is
1594
+ stopped when none of the last ``n_iter_no_change`` scores are better
1595
+ than the ``n_iter_no_change - 1`` -th-to-last one, up to some
1596
+ tolerance. Only used if early stopping is performed.
1597
+ tol : float, default=1e-7
1598
+ The absolute tolerance to use when comparing scores during early
1599
+ stopping. The higher the tolerance, the more likely we are to early
1600
+ stop: higher tolerance means that it will be harder for subsequent
1601
+ iterations to be considered an improvement upon the reference score.
1602
+ verbose : int, default=0
1603
+ The verbosity level. If not zero, print some information about the
1604
+ fitting process.
1605
+ random_state : int, RandomState instance or None, default=None
1606
+ Pseudo-random number generator to control the subsampling in the
1607
+ binning process, and the train/validation data split if early stopping
1608
+ is enabled.
1609
+ Pass an int for reproducible output across multiple function calls.
1610
+ See :term:`Glossary <random_state>`.
1611
+
1612
+ Attributes
1613
+ ----------
1614
+ do_early_stopping_ : bool
1615
+ Indicates whether early stopping is used during training.
1616
+ n_iter_ : int
1617
+ The number of iterations as selected by early stopping, depending on
1618
+ the `early_stopping` parameter. Otherwise it corresponds to max_iter.
1619
+ n_trees_per_iteration_ : int
1620
+ The number of tree that are built at each iteration. For regressors,
1621
+ this is always 1.
1622
+ train_score_ : ndarray, shape (n_iter_+1,)
1623
+ The scores at each iteration on the training data. The first entry
1624
+ is the score of the ensemble before the first iteration. Scores are
1625
+ computed according to the ``scoring`` parameter. If ``scoring`` is
1626
+ not 'loss', scores are computed on a subset of at most 10 000
1627
+ samples. Empty if no early stopping.
1628
+ validation_score_ : ndarray, shape (n_iter_+1,)
1629
+ The scores at each iteration on the held-out validation data. The
1630
+ first entry is the score of the ensemble before the first iteration.
1631
+ Scores are computed according to the ``scoring`` parameter. Empty if
1632
+ no early stopping or if ``validation_fraction`` is None.
1633
+ is_categorical_ : ndarray, shape (n_features, ) or None
1634
+ Boolean mask for the categorical features. ``None`` if there are no
1635
+ categorical features.
1636
+ n_features_in_ : int
1637
+ Number of features seen during :term:`fit`.
1638
+
1639
+ .. versionadded:: 0.24
1640
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1641
+ Names of features seen during :term:`fit`. Defined only when `X`
1642
+ has feature names that are all strings.
1643
+
1644
+ .. versionadded:: 1.0
1645
+
1646
+ See Also
1647
+ --------
1648
+ GradientBoostingRegressor : Exact gradient boosting method that does not
1649
+ scale as good on datasets with a large number of samples.
1650
+ sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
1651
+ RandomForestRegressor : A meta-estimator that fits a number of decision
1652
+ tree regressors on various sub-samples of the dataset and uses
1653
+ averaging to improve the statistical performance and control
1654
+ over-fitting.
1655
+ AdaBoostRegressor : A meta-estimator that begins by fitting a regressor
1656
+ on the original dataset and then fits additional copies of the
1657
+ regressor on the same dataset but where the weights of instances are
1658
+ adjusted according to the error of the current prediction. As such,
1659
+ subsequent regressors focus more on difficult cases.
1660
+
1661
+ Examples
1662
+ --------
1663
+ >>> from sklearn.ensemble import HistGradientBoostingRegressor
1664
+ >>> from sklearn.datasets import load_diabetes
1665
+ >>> X, y = load_diabetes(return_X_y=True)
1666
+ >>> est = HistGradientBoostingRegressor().fit(X, y)
1667
+ >>> est.score(X, y)
1668
+ 0.92...
1669
+ """
1670
+
1671
+ _parameter_constraints: dict = {
1672
+ **BaseHistGradientBoosting._parameter_constraints,
1673
+ "loss": [
1674
+ StrOptions(
1675
+ {
1676
+ "squared_error",
1677
+ "absolute_error",
1678
+ "poisson",
1679
+ "gamma",
1680
+ "quantile",
1681
+ }
1682
+ ),
1683
+ BaseLoss,
1684
+ ],
1685
+ "quantile": [Interval(Real, 0, 1, closed="both"), None],
1686
+ }
1687
+
1688
+ def __init__(
1689
+ self,
1690
+ loss="squared_error",
1691
+ *,
1692
+ quantile=None,
1693
+ learning_rate=0.1,
1694
+ max_iter=100,
1695
+ max_leaf_nodes=31,
1696
+ max_depth=None,
1697
+ min_samples_leaf=20,
1698
+ l2_regularization=0.0,
1699
+ max_features=1.0,
1700
+ max_bins=255,
1701
+ categorical_features="warn",
1702
+ monotonic_cst=None,
1703
+ interaction_cst=None,
1704
+ warm_start=False,
1705
+ early_stopping="auto",
1706
+ scoring="loss",
1707
+ validation_fraction=0.1,
1708
+ n_iter_no_change=10,
1709
+ tol=1e-7,
1710
+ verbose=0,
1711
+ random_state=None,
1712
+ ):
1713
+ super(HistGradientBoostingRegressor, self).__init__(
1714
+ loss=loss,
1715
+ learning_rate=learning_rate,
1716
+ max_iter=max_iter,
1717
+ max_leaf_nodes=max_leaf_nodes,
1718
+ max_depth=max_depth,
1719
+ min_samples_leaf=min_samples_leaf,
1720
+ l2_regularization=l2_regularization,
1721
+ max_features=max_features,
1722
+ max_bins=max_bins,
1723
+ monotonic_cst=monotonic_cst,
1724
+ interaction_cst=interaction_cst,
1725
+ categorical_features=categorical_features,
1726
+ early_stopping=early_stopping,
1727
+ warm_start=warm_start,
1728
+ scoring=scoring,
1729
+ validation_fraction=validation_fraction,
1730
+ n_iter_no_change=n_iter_no_change,
1731
+ tol=tol,
1732
+ verbose=verbose,
1733
+ random_state=random_state,
1734
+ )
1735
+ self.quantile = quantile
1736
+
1737
+ def predict(self, X):
1738
+ """Predict values for X.
1739
+
1740
+ Parameters
1741
+ ----------
1742
+ X : array-like, shape (n_samples, n_features)
1743
+ The input samples.
1744
+
1745
+ Returns
1746
+ -------
1747
+ y : ndarray, shape (n_samples,)
1748
+ The predicted values.
1749
+ """
1750
+ check_is_fitted(self)
1751
+ # Return inverse link of raw predictions after converting
1752
+ # shape (n_samples, 1) to (n_samples,)
1753
+ return self._loss.link.inverse(self._raw_predict(X).ravel())
1754
+
1755
+ def staged_predict(self, X):
1756
+ """Predict regression target for each iteration.
1757
+
1758
+ This method allows monitoring (i.e. determine error on testing set)
1759
+ after each stage.
1760
+
1761
+ .. versionadded:: 0.24
1762
+
1763
+ Parameters
1764
+ ----------
1765
+ X : array-like of shape (n_samples, n_features)
1766
+ The input samples.
1767
+
1768
+ Yields
1769
+ ------
1770
+ y : generator of ndarray of shape (n_samples,)
1771
+ The predicted values of the input samples, for each iteration.
1772
+ """
1773
+ for raw_predictions in self._staged_raw_predict(X):
1774
+ yield self._loss.link.inverse(raw_predictions.ravel())
1775
+
1776
+ def _encode_y(self, y):
1777
+ # Just convert y to the expected dtype
1778
+ self.n_trees_per_iteration_ = 1
1779
+ y = y.astype(Y_DTYPE, copy=False)
1780
+ if self.loss == "gamma":
1781
+ # Ensure y > 0
1782
+ if not np.all(y > 0):
1783
+ raise ValueError("loss='gamma' requires strictly positive y.")
1784
+ elif self.loss == "poisson":
1785
+ # Ensure y >= 0 and sum(y) > 0
1786
+ if not (np.all(y >= 0) and np.sum(y) > 0):
1787
+ raise ValueError(
1788
+ "loss='poisson' requires non-negative y and sum(y) > 0."
1789
+ )
1790
+ return y
1791
+
1792
+ def _get_loss(self, sample_weight):
1793
+ if self.loss == "quantile":
1794
+ return _LOSSES[self.loss](
1795
+ sample_weight=sample_weight, quantile=self.quantile
1796
+ )
1797
+ else:
1798
+ return _LOSSES[self.loss](sample_weight=sample_weight)
1799
+
1800
+
1801
+ class HistGradientBoostingClassifier(ClassifierMixin, BaseHistGradientBoosting):
1802
+ """Histogram-based Gradient Boosting Classification Tree.
1803
+
1804
+ This estimator is much faster than
1805
+ :class:`GradientBoostingClassifier<sklearn.ensemble.GradientBoostingClassifier>`
1806
+ for big datasets (n_samples >= 10 000).
1807
+
1808
+ This estimator has native support for missing values (NaNs). During
1809
+ training, the tree grower learns at each split point whether samples
1810
+ with missing values should go to the left or right child, based on the
1811
+ potential gain. When predicting, samples with missing values are
1812
+ assigned to the left or right child consequently. If no missing values
1813
+ were encountered for a given feature during training, then samples with
1814
+ missing values are mapped to whichever child has the most samples.
1815
+
1816
+ This implementation is inspired by
1817
+ `LightGBM <https://github.com/Microsoft/LightGBM>`_.
1818
+
1819
+ Read more in the :ref:`User Guide <histogram_based_gradient_boosting>`.
1820
+
1821
+ .. versionadded:: 0.21
1822
+
1823
+ Parameters
1824
+ ----------
1825
+ loss : {'log_loss'}, default='log_loss'
1826
+ The loss function to use in the boosting process.
1827
+
1828
+ For binary classification problems, 'log_loss' is also known as logistic loss,
1829
+ binomial deviance or binary crossentropy. Internally, the model fits one tree
1830
+ per boosting iteration and uses the logistic sigmoid function (expit) as
1831
+ inverse link function to compute the predicted positive class probability.
1832
+
1833
+ For multiclass classification problems, 'log_loss' is also known as multinomial
1834
+ deviance or categorical crossentropy. Internally, the model fits one tree per
1835
+ boosting iteration and per class and uses the softmax function as inverse link
1836
+ function to compute the predicted probabilities of the classes.
1837
+
1838
+ learning_rate : float, default=0.1
1839
+ The learning rate, also known as *shrinkage*. This is used as a
1840
+ multiplicative factor for the leaves values. Use ``1`` for no
1841
+ shrinkage.
1842
+ max_iter : int, default=100
1843
+ The maximum number of iterations of the boosting process, i.e. the
1844
+ maximum number of trees for binary classification. For multiclass
1845
+ classification, `n_classes` trees per iteration are built.
1846
+ max_leaf_nodes : int or None, default=31
1847
+ The maximum number of leaves for each tree. Must be strictly greater
1848
+ than 1. If None, there is no maximum limit.
1849
+ max_depth : int or None, default=None
1850
+ The maximum depth of each tree. The depth of a tree is the number of
1851
+ edges to go from the root to the deepest leaf.
1852
+ Depth isn't constrained by default.
1853
+ min_samples_leaf : int, default=20
1854
+ The minimum number of samples per leaf. For small datasets with less
1855
+ than a few hundred samples, it is recommended to lower this value
1856
+ since only very shallow trees would be built.
1857
+ l2_regularization : float, default=0
1858
+ The L2 regularization parameter. Use ``0`` for no regularization (default).
1859
+ max_features : float, default=1.0
1860
+ Proportion of randomly chosen features in each and every node split.
1861
+ This is a form of regularization, smaller values make the trees weaker
1862
+ learners and might prevent overfitting.
1863
+ If interaction constraints from `interaction_cst` are present, only allowed
1864
+ features are taken into account for the subsampling.
1865
+
1866
+ .. versionadded:: 1.4
1867
+
1868
+ max_bins : int, default=255
1869
+ The maximum number of bins to use for non-missing values. Before
1870
+ training, each feature of the input array `X` is binned into
1871
+ integer-valued bins, which allows for a much faster training stage.
1872
+ Features with a small number of unique values may use less than
1873
+ ``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin
1874
+ is always reserved for missing values. Must be no larger than 255.
1875
+ categorical_features : array-like of {bool, int, str} of shape (n_features) \
1876
+ or shape (n_categorical_features,), default=None
1877
+ Indicates the categorical features.
1878
+
1879
+ - None : no feature will be considered categorical.
1880
+ - boolean array-like : boolean mask indicating categorical features.
1881
+ - integer array-like : integer indices indicating categorical
1882
+ features.
1883
+ - str array-like: names of categorical features (assuming the training
1884
+ data has feature names).
1885
+ - `"from_dtype"`: dataframe columns with dtype "category" are
1886
+ considered to be categorical features. The input must be an object
1887
+ exposing a ``__dataframe__`` method such as pandas or polars
1888
+ DataFrames to use this feature.
1889
+
1890
+ For each categorical feature, there must be at most `max_bins` unique
1891
+ categories. Negative values for categorical features encoded as numeric
1892
+ dtypes are treated as missing values. All categorical values are
1893
+ converted to floating point numbers. This means that categorical values
1894
+ of 1.0 and 1 are treated as the same category.
1895
+
1896
+ Read more in the :ref:`User Guide <categorical_support_gbdt>`.
1897
+
1898
+ .. versionadded:: 0.24
1899
+
1900
+ .. versionchanged:: 1.2
1901
+ Added support for feature names.
1902
+
1903
+ .. versionchanged:: 1.4
1904
+ Added `"from_dtype"` option. The default will change to `"from_dtype"` in
1905
+ v1.6.
1906
+
1907
+ monotonic_cst : array-like of int of shape (n_features) or dict, default=None
1908
+ Monotonic constraint to enforce on each feature are specified using the
1909
+ following integer values:
1910
+
1911
+ - 1: monotonic increase
1912
+ - 0: no constraint
1913
+ - -1: monotonic decrease
1914
+
1915
+ If a dict with str keys, map feature to monotonic constraints by name.
1916
+ If an array, the features are mapped to constraints by position. See
1917
+ :ref:`monotonic_cst_features_names` for a usage example.
1918
+
1919
+ The constraints are only valid for binary classifications and hold
1920
+ over the probability of the positive class.
1921
+ Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
1922
+
1923
+ .. versionadded:: 0.23
1924
+
1925
+ .. versionchanged:: 1.2
1926
+ Accept dict of constraints with feature names as keys.
1927
+
1928
+ interaction_cst : {"pairwise", "no_interactions"} or sequence of lists/tuples/sets \
1929
+ of int, default=None
1930
+ Specify interaction constraints, the sets of features which can
1931
+ interact with each other in child node splits.
1932
+
1933
+ Each item specifies the set of feature indices that are allowed
1934
+ to interact with each other. If there are more features than
1935
+ specified in these constraints, they are treated as if they were
1936
+ specified as an additional set.
1937
+
1938
+ The strings "pairwise" and "no_interactions" are shorthands for
1939
+ allowing only pairwise or no interactions, respectively.
1940
+
1941
+ For instance, with 5 features in total, `interaction_cst=[{0, 1}]`
1942
+ is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`,
1943
+ and specifies that each branch of a tree will either only split
1944
+ on features 0 and 1 or only split on features 2, 3 and 4.
1945
+
1946
+ .. versionadded:: 1.2
1947
+
1948
+ warm_start : bool, default=False
1949
+ When set to ``True``, reuse the solution of the previous call to fit
1950
+ and add more estimators to the ensemble. For results to be valid, the
1951
+ estimator should be re-trained on the same data only.
1952
+ See :term:`the Glossary <warm_start>`.
1953
+ early_stopping : 'auto' or bool, default='auto'
1954
+ If 'auto', early stopping is enabled if the sample size is larger than
1955
+ 10000. If True, early stopping is enabled, otherwise early stopping is
1956
+ disabled.
1957
+
1958
+ .. versionadded:: 0.23
1959
+
1960
+ scoring : str or callable or None, default='loss'
1961
+ Scoring parameter to use for early stopping. It can be a single
1962
+ string (see :ref:`scoring_parameter`) or a callable (see
1963
+ :ref:`scoring`). If None, the estimator's default scorer
1964
+ is used. If ``scoring='loss'``, early stopping is checked
1965
+ w.r.t the loss value. Only used if early stopping is performed.
1966
+ validation_fraction : int or float or None, default=0.1
1967
+ Proportion (or absolute size) of training data to set aside as
1968
+ validation data for early stopping. If None, early stopping is done on
1969
+ the training data. Only used if early stopping is performed.
1970
+ n_iter_no_change : int, default=10
1971
+ Used to determine when to "early stop". The fitting process is
1972
+ stopped when none of the last ``n_iter_no_change`` scores are better
1973
+ than the ``n_iter_no_change - 1`` -th-to-last one, up to some
1974
+ tolerance. Only used if early stopping is performed.
1975
+ tol : float, default=1e-7
1976
+ The absolute tolerance to use when comparing scores. The higher the
1977
+ tolerance, the more likely we are to early stop: higher tolerance
1978
+ means that it will be harder for subsequent iterations to be
1979
+ considered an improvement upon the reference score.
1980
+ verbose : int, default=0
1981
+ The verbosity level. If not zero, print some information about the
1982
+ fitting process.
1983
+ random_state : int, RandomState instance or None, default=None
1984
+ Pseudo-random number generator to control the subsampling in the
1985
+ binning process, and the train/validation data split if early stopping
1986
+ is enabled.
1987
+ Pass an int for reproducible output across multiple function calls.
1988
+ See :term:`Glossary <random_state>`.
1989
+ class_weight : dict or 'balanced', default=None
1990
+ Weights associated with classes in the form `{class_label: weight}`.
1991
+ If not given, all classes are supposed to have weight one.
1992
+ The "balanced" mode uses the values of y to automatically adjust
1993
+ weights inversely proportional to class frequencies in the input data
1994
+ as `n_samples / (n_classes * np.bincount(y))`.
1995
+ Note that these weights will be multiplied with sample_weight (passed
1996
+ through the fit method) if `sample_weight` is specified.
1997
+
1998
+ .. versionadded:: 1.2
1999
+
2000
+ Attributes
2001
+ ----------
2002
+ classes_ : array, shape = (n_classes,)
2003
+ Class labels.
2004
+ do_early_stopping_ : bool
2005
+ Indicates whether early stopping is used during training.
2006
+ n_iter_ : int
2007
+ The number of iterations as selected by early stopping, depending on
2008
+ the `early_stopping` parameter. Otherwise it corresponds to max_iter.
2009
+ n_trees_per_iteration_ : int
2010
+ The number of tree that are built at each iteration. This is equal to 1
2011
+ for binary classification, and to ``n_classes`` for multiclass
2012
+ classification.
2013
+ train_score_ : ndarray, shape (n_iter_+1,)
2014
+ The scores at each iteration on the training data. The first entry
2015
+ is the score of the ensemble before the first iteration. Scores are
2016
+ computed according to the ``scoring`` parameter. If ``scoring`` is
2017
+ not 'loss', scores are computed on a subset of at most 10 000
2018
+ samples. Empty if no early stopping.
2019
+ validation_score_ : ndarray, shape (n_iter_+1,)
2020
+ The scores at each iteration on the held-out validation data. The
2021
+ first entry is the score of the ensemble before the first iteration.
2022
+ Scores are computed according to the ``scoring`` parameter. Empty if
2023
+ no early stopping or if ``validation_fraction`` is None.
2024
+ is_categorical_ : ndarray, shape (n_features, ) or None
2025
+ Boolean mask for the categorical features. ``None`` if there are no
2026
+ categorical features.
2027
+ n_features_in_ : int
2028
+ Number of features seen during :term:`fit`.
2029
+
2030
+ .. versionadded:: 0.24
2031
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
2032
+ Names of features seen during :term:`fit`. Defined only when `X`
2033
+ has feature names that are all strings.
2034
+
2035
+ .. versionadded:: 1.0
2036
+
2037
+ See Also
2038
+ --------
2039
+ GradientBoostingClassifier : Exact gradient boosting method that does not
2040
+ scale as good on datasets with a large number of samples.
2041
+ sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
2042
+ RandomForestClassifier : A meta-estimator that fits a number of decision
2043
+ tree classifiers on various sub-samples of the dataset and uses
2044
+ averaging to improve the predictive accuracy and control over-fitting.
2045
+ AdaBoostClassifier : A meta-estimator that begins by fitting a classifier
2046
+ on the original dataset and then fits additional copies of the
2047
+ classifier on the same dataset where the weights of incorrectly
2048
+ classified instances are adjusted such that subsequent classifiers
2049
+ focus more on difficult cases.
2050
+
2051
+ Examples
2052
+ --------
2053
+ >>> from sklearn.ensemble import HistGradientBoostingClassifier
2054
+ >>> from sklearn.datasets import load_iris
2055
+ >>> X, y = load_iris(return_X_y=True)
2056
+ >>> clf = HistGradientBoostingClassifier().fit(X, y)
2057
+ >>> clf.score(X, y)
2058
+ 1.0
2059
+ """
2060
+
2061
+ _parameter_constraints: dict = {
2062
+ **BaseHistGradientBoosting._parameter_constraints,
2063
+ "loss": [StrOptions({"log_loss"}), BaseLoss],
2064
+ "class_weight": [dict, StrOptions({"balanced"}), None],
2065
+ }
2066
+
2067
+ def __init__(
2068
+ self,
2069
+ loss="log_loss",
2070
+ *,
2071
+ learning_rate=0.1,
2072
+ max_iter=100,
2073
+ max_leaf_nodes=31,
2074
+ max_depth=None,
2075
+ min_samples_leaf=20,
2076
+ l2_regularization=0.0,
2077
+ max_features=1.0,
2078
+ max_bins=255,
2079
+ categorical_features="warn",
2080
+ monotonic_cst=None,
2081
+ interaction_cst=None,
2082
+ warm_start=False,
2083
+ early_stopping="auto",
2084
+ scoring="loss",
2085
+ validation_fraction=0.1,
2086
+ n_iter_no_change=10,
2087
+ tol=1e-7,
2088
+ verbose=0,
2089
+ random_state=None,
2090
+ class_weight=None,
2091
+ ):
2092
+ super(HistGradientBoostingClassifier, self).__init__(
2093
+ loss=loss,
2094
+ learning_rate=learning_rate,
2095
+ max_iter=max_iter,
2096
+ max_leaf_nodes=max_leaf_nodes,
2097
+ max_depth=max_depth,
2098
+ min_samples_leaf=min_samples_leaf,
2099
+ l2_regularization=l2_regularization,
2100
+ max_features=max_features,
2101
+ max_bins=max_bins,
2102
+ categorical_features=categorical_features,
2103
+ monotonic_cst=monotonic_cst,
2104
+ interaction_cst=interaction_cst,
2105
+ warm_start=warm_start,
2106
+ early_stopping=early_stopping,
2107
+ scoring=scoring,
2108
+ validation_fraction=validation_fraction,
2109
+ n_iter_no_change=n_iter_no_change,
2110
+ tol=tol,
2111
+ verbose=verbose,
2112
+ random_state=random_state,
2113
+ )
2114
+ self.class_weight = class_weight
2115
+
2116
+ def _finalize_sample_weight(self, sample_weight, y):
2117
+ """Adjust sample_weights with class_weights."""
2118
+ if self.class_weight is None:
2119
+ return sample_weight
2120
+
2121
+ expanded_class_weight = compute_sample_weight(self.class_weight, y)
2122
+
2123
+ if sample_weight is not None:
2124
+ return sample_weight * expanded_class_weight
2125
+ else:
2126
+ return expanded_class_weight
2127
+
2128
+ def predict(self, X):
2129
+ """Predict classes for X.
2130
+
2131
+ Parameters
2132
+ ----------
2133
+ X : array-like, shape (n_samples, n_features)
2134
+ The input samples.
2135
+
2136
+ Returns
2137
+ -------
2138
+ y : ndarray, shape (n_samples,)
2139
+ The predicted classes.
2140
+ """
2141
+ # TODO: This could be done in parallel
2142
+ encoded_classes = np.argmax(self.predict_proba(X), axis=1)
2143
+ return self.classes_[encoded_classes]
2144
+
2145
+ def staged_predict(self, X):
2146
+ """Predict classes at each iteration.
2147
+
2148
+ This method allows monitoring (i.e. determine error on testing set)
2149
+ after each stage.
2150
+
2151
+ .. versionadded:: 0.24
2152
+
2153
+ Parameters
2154
+ ----------
2155
+ X : array-like of shape (n_samples, n_features)
2156
+ The input samples.
2157
+
2158
+ Yields
2159
+ ------
2160
+ y : generator of ndarray of shape (n_samples,)
2161
+ The predicted classes of the input samples, for each iteration.
2162
+ """
2163
+ for proba in self.staged_predict_proba(X):
2164
+ encoded_classes = np.argmax(proba, axis=1)
2165
+ yield self.classes_.take(encoded_classes, axis=0)
2166
+
2167
+ def predict_proba(self, X):
2168
+ """Predict class probabilities for X.
2169
+
2170
+ Parameters
2171
+ ----------
2172
+ X : array-like, shape (n_samples, n_features)
2173
+ The input samples.
2174
+
2175
+ Returns
2176
+ -------
2177
+ p : ndarray, shape (n_samples, n_classes)
2178
+ The class probabilities of the input samples.
2179
+ """
2180
+ raw_predictions = self._raw_predict(X)
2181
+ return self._loss.predict_proba(raw_predictions)
2182
+
2183
+ def staged_predict_proba(self, X):
2184
+ """Predict class probabilities at each iteration.
2185
+
2186
+ This method allows monitoring (i.e. determine error on testing set)
2187
+ after each stage.
2188
+
2189
+ Parameters
2190
+ ----------
2191
+ X : array-like of shape (n_samples, n_features)
2192
+ The input samples.
2193
+
2194
+ Yields
2195
+ ------
2196
+ y : generator of ndarray of shape (n_samples,)
2197
+ The predicted class probabilities of the input samples,
2198
+ for each iteration.
2199
+ """
2200
+ for raw_predictions in self._staged_raw_predict(X):
2201
+ yield self._loss.predict_proba(raw_predictions)
2202
+
2203
+ def decision_function(self, X):
2204
+ """Compute the decision function of ``X``.
2205
+
2206
+ Parameters
2207
+ ----------
2208
+ X : array-like, shape (n_samples, n_features)
2209
+ The input samples.
2210
+
2211
+ Returns
2212
+ -------
2213
+ decision : ndarray, shape (n_samples,) or \
2214
+ (n_samples, n_trees_per_iteration)
2215
+ The raw predicted values (i.e. the sum of the trees leaves) for
2216
+ each sample. n_trees_per_iteration is equal to the number of
2217
+ classes in multiclass classification.
2218
+ """
2219
+ decision = self._raw_predict(X)
2220
+ if decision.shape[1] == 1:
2221
+ decision = decision.ravel()
2222
+ return decision
2223
+
2224
+ def staged_decision_function(self, X):
2225
+ """Compute decision function of ``X`` for each iteration.
2226
+
2227
+ This method allows monitoring (i.e. determine error on testing set)
2228
+ after each stage.
2229
+
2230
+ Parameters
2231
+ ----------
2232
+ X : array-like of shape (n_samples, n_features)
2233
+ The input samples.
2234
+
2235
+ Yields
2236
+ ------
2237
+ decision : generator of ndarray of shape (n_samples,) or \
2238
+ (n_samples, n_trees_per_iteration)
2239
+ The decision function of the input samples, which corresponds to
2240
+ the raw values predicted from the trees of the ensemble . The
2241
+ classes corresponds to that in the attribute :term:`classes_`.
2242
+ """
2243
+ for staged_decision in self._staged_raw_predict(X):
2244
+ if staged_decision.shape[1] == 1:
2245
+ staged_decision = staged_decision.ravel()
2246
+ yield staged_decision
2247
+
2248
+ def _encode_y(self, y):
2249
+ # encode classes into 0 ... n_classes - 1 and sets attributes classes_
2250
+ # and n_trees_per_iteration_
2251
+ check_classification_targets(y)
2252
+
2253
+ label_encoder = LabelEncoder()
2254
+ encoded_y = label_encoder.fit_transform(y)
2255
+ self.classes_ = label_encoder.classes_
2256
+ n_classes = self.classes_.shape[0]
2257
+ # only 1 tree for binary classification. For multiclass classification,
2258
+ # we build 1 tree per class.
2259
+ self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes
2260
+ encoded_y = encoded_y.astype(Y_DTYPE, copy=False)
2261
+ return encoded_y
2262
+
2263
+ def _get_loss(self, sample_weight):
2264
+ # At this point self.loss == "log_loss"
2265
+ if self.n_trees_per_iteration_ == 1:
2266
+ return HalfBinomialLoss(sample_weight=sample_weight)
2267
+ else:
2268
+ return HalfMultinomialLoss(
2269
+ sample_weight=sample_weight, n_classes=self.n_trees_per_iteration_
2270
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py ADDED
@@ -0,0 +1,798 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the TreeGrower class.
3
+
4
+ TreeGrower builds a regression tree fitting a Newton-Raphson step, based on
5
+ the gradients and hessians of the training data.
6
+ """
7
+ # Author: Nicolas Hug
8
+
9
+ import numbers
10
+ from heapq import heappop, heappush
11
+ from timeit import default_timer as time
12
+
13
+ import numpy as np
14
+
15
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
16
+
17
+ from ._bitset import set_raw_bitset_from_binned_bitset
18
+ from .common import (
19
+ PREDICTOR_RECORD_DTYPE,
20
+ X_BITSET_INNER_DTYPE,
21
+ Y_DTYPE,
22
+ MonotonicConstraint,
23
+ )
24
+ from .histogram import HistogramBuilder
25
+ from .predictor import TreePredictor
26
+ from .splitting import Splitter
27
+ from .utils import sum_parallel
28
+
29
+ EPS = np.finfo(Y_DTYPE).eps # to avoid zero division errors
30
+
31
+
32
+ class TreeNode:
33
+ """Tree Node class used in TreeGrower.
34
+
35
+ This isn't used for prediction purposes, only for training (see
36
+ TreePredictor).
37
+
38
+ Parameters
39
+ ----------
40
+ depth : int
41
+ The depth of the node, i.e. its distance from the root.
42
+ sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
43
+ The indices of the samples at the node.
44
+ sum_gradients : float
45
+ The sum of the gradients of the samples at the node.
46
+ sum_hessians : float
47
+ The sum of the hessians of the samples at the node.
48
+
49
+ Attributes
50
+ ----------
51
+ depth : int
52
+ The depth of the node, i.e. its distance from the root.
53
+ sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
54
+ The indices of the samples at the node.
55
+ sum_gradients : float
56
+ The sum of the gradients of the samples at the node.
57
+ sum_hessians : float
58
+ The sum of the hessians of the samples at the node.
59
+ split_info : SplitInfo or None
60
+ The result of the split evaluation.
61
+ is_leaf : bool
62
+ True if node is a leaf
63
+ left_child : TreeNode or None
64
+ The left child of the node. None for leaves.
65
+ right_child : TreeNode or None
66
+ The right child of the node. None for leaves.
67
+ value : float or None
68
+ The value of the leaf, as computed in finalize_leaf(). None for
69
+ non-leaf nodes.
70
+ partition_start : int
71
+ start position of the node's sample_indices in splitter.partition.
72
+ partition_stop : int
73
+ stop position of the node's sample_indices in splitter.partition.
74
+ allowed_features : None or ndarray, dtype=int
75
+ Indices of features allowed to split for children.
76
+ interaction_cst_indices : None or list of ints
77
+ Indices of the interaction sets that have to be applied on splits of
78
+ child nodes. The fewer sets the stronger the constraint as fewer sets
79
+ contain fewer features.
80
+ children_lower_bound : float
81
+ children_upper_bound : float
82
+ """
83
+
84
+ split_info = None
85
+ left_child = None
86
+ right_child = None
87
+ histograms = None
88
+
89
+ # start and stop indices of the node in the splitter.partition
90
+ # array. Concretely,
91
+ # self.sample_indices = view(self.splitter.partition[start:stop])
92
+ # Please see the comments about splitter.partition and
93
+ # splitter.split_indices for more info about this design.
94
+ # These 2 attributes are only used in _update_raw_prediction, because we
95
+ # need to iterate over the leaves and I don't know how to efficiently
96
+ # store the sample_indices views because they're all of different sizes.
97
+ partition_start = 0
98
+ partition_stop = 0
99
+
100
+ def __init__(self, depth, sample_indices, sum_gradients, sum_hessians, value=None):
101
+ self.depth = depth
102
+ self.sample_indices = sample_indices
103
+ self.n_samples = sample_indices.shape[0]
104
+ self.sum_gradients = sum_gradients
105
+ self.sum_hessians = sum_hessians
106
+ self.value = value
107
+ self.is_leaf = False
108
+ self.allowed_features = None
109
+ self.interaction_cst_indices = None
110
+ self.set_children_bounds(float("-inf"), float("+inf"))
111
+
112
+ def set_children_bounds(self, lower, upper):
113
+ """Set children values bounds to respect monotonic constraints."""
114
+
115
+ # These are bounds for the node's *children* values, not the node's
116
+ # value. The bounds are used in the splitter when considering potential
117
+ # left and right child.
118
+ self.children_lower_bound = lower
119
+ self.children_upper_bound = upper
120
+
121
+ def __lt__(self, other_node):
122
+ """Comparison for priority queue.
123
+
124
+ Nodes with high gain are higher priority than nodes with low gain.
125
+
126
+ heapq.heappush only need the '<' operator.
127
+ heapq.heappop take the smallest item first (smaller is higher
128
+ priority).
129
+
130
+ Parameters
131
+ ----------
132
+ other_node : TreeNode
133
+ The node to compare with.
134
+ """
135
+ return self.split_info.gain > other_node.split_info.gain
136
+
137
+
138
+ class TreeGrower:
139
+ """Tree grower class used to build a tree.
140
+
141
+ The tree is fitted to predict the values of a Newton-Raphson step. The
142
+ splits are considered in a best-first fashion, and the quality of a
143
+ split is defined in splitting._split_gain.
144
+
145
+ Parameters
146
+ ----------
147
+ X_binned : ndarray of shape (n_samples, n_features), dtype=np.uint8
148
+ The binned input samples. Must be Fortran-aligned.
149
+ gradients : ndarray of shape (n_samples,)
150
+ The gradients of each training sample. Those are the gradients of the
151
+ loss w.r.t the predictions, evaluated at iteration ``i - 1``.
152
+ hessians : ndarray of shape (n_samples,)
153
+ The hessians of each training sample. Those are the hessians of the
154
+ loss w.r.t the predictions, evaluated at iteration ``i - 1``.
155
+ max_leaf_nodes : int, default=None
156
+ The maximum number of leaves for each tree. If None, there is no
157
+ maximum limit.
158
+ max_depth : int, default=None
159
+ The maximum depth of each tree. The depth of a tree is the number of
160
+ edges to go from the root to the deepest leaf.
161
+ Depth isn't constrained by default.
162
+ min_samples_leaf : int, default=20
163
+ The minimum number of samples per leaf.
164
+ min_gain_to_split : float, default=0.
165
+ The minimum gain needed to split a node. Splits with lower gain will
166
+ be ignored.
167
+ min_hessian_to_split : float, default=1e-3
168
+ The minimum sum of hessians needed in each node. Splits that result in
169
+ at least one child having a sum of hessians less than
170
+ ``min_hessian_to_split`` are discarded.
171
+ n_bins : int, default=256
172
+ The total number of bins, including the bin for missing values. Used
173
+ to define the shape of the histograms.
174
+ n_bins_non_missing : ndarray, dtype=np.uint32, default=None
175
+ For each feature, gives the number of bins actually used for
176
+ non-missing values. For features with a lot of unique values, this
177
+ is equal to ``n_bins - 1``. If it's an int, all features are
178
+ considered to have the same number of bins. If None, all features
179
+ are considered to have ``n_bins - 1`` bins.
180
+ has_missing_values : bool or ndarray, dtype=bool, default=False
181
+ Whether each feature contains missing values (in the training data).
182
+ If it's a bool, the same value is used for all features.
183
+ is_categorical : ndarray of bool of shape (n_features,), default=None
184
+ Indicates categorical features.
185
+ monotonic_cst : array-like of int of shape (n_features,), dtype=int, default=None
186
+ Indicates the monotonic constraint to enforce on each feature.
187
+ - 1: monotonic increase
188
+ - 0: no constraint
189
+ - -1: monotonic decrease
190
+
191
+ Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
192
+ interaction_cst : list of sets of integers, default=None
193
+ List of interaction constraints.
194
+ l2_regularization : float, default=0.
195
+ The L2 regularization parameter.
196
+ feature_fraction_per_split : float, default=1
197
+ Proportion of randomly chosen features in each and every node split.
198
+ This is a form of regularization, smaller values make the trees weaker
199
+ learners and might prevent overfitting.
200
+ rng : Generator
201
+ Numpy random Generator used for feature subsampling.
202
+ shrinkage : float, default=1.
203
+ The shrinkage parameter to apply to the leaves values, also known as
204
+ learning rate.
205
+ n_threads : int, default=None
206
+ Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
207
+ to determine the effective number of threads use, which takes cgroups CPU
208
+ quotes into account. See the docstring of `_openmp_effective_n_threads`
209
+ for details.
210
+
211
+ Attributes
212
+ ----------
213
+ histogram_builder : HistogramBuilder
214
+ splitter : Splitter
215
+ root : TreeNode
216
+ finalized_leaves : list of TreeNode
217
+ splittable_nodes : list of TreeNode
218
+ missing_values_bin_idx : int
219
+ Equals n_bins - 1
220
+ n_categorical_splits : int
221
+ n_features : int
222
+ n_nodes : int
223
+ total_find_split_time : float
224
+ Time spent finding the best splits
225
+ total_compute_hist_time : float
226
+ Time spent computing histograms
227
+ total_apply_split_time : float
228
+ Time spent splitting nodes
229
+ with_monotonic_cst : bool
230
+ Whether there are monotonic constraints that apply. False iff monotonic_cst is
231
+ None.
232
+ """
233
+
234
+ def __init__(
235
+ self,
236
+ X_binned,
237
+ gradients,
238
+ hessians,
239
+ max_leaf_nodes=None,
240
+ max_depth=None,
241
+ min_samples_leaf=20,
242
+ min_gain_to_split=0.0,
243
+ min_hessian_to_split=1e-3,
244
+ n_bins=256,
245
+ n_bins_non_missing=None,
246
+ has_missing_values=False,
247
+ is_categorical=None,
248
+ monotonic_cst=None,
249
+ interaction_cst=None,
250
+ l2_regularization=0.0,
251
+ feature_fraction_per_split=1.0,
252
+ rng=np.random.default_rng(),
253
+ shrinkage=1.0,
254
+ n_threads=None,
255
+ ):
256
+ self._validate_parameters(
257
+ X_binned,
258
+ min_gain_to_split,
259
+ min_hessian_to_split,
260
+ )
261
+ n_threads = _openmp_effective_n_threads(n_threads)
262
+
263
+ if n_bins_non_missing is None:
264
+ n_bins_non_missing = n_bins - 1
265
+
266
+ if isinstance(n_bins_non_missing, numbers.Integral):
267
+ n_bins_non_missing = np.array(
268
+ [n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32
269
+ )
270
+ else:
271
+ n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32)
272
+
273
+ if isinstance(has_missing_values, bool):
274
+ has_missing_values = [has_missing_values] * X_binned.shape[1]
275
+ has_missing_values = np.asarray(has_missing_values, dtype=np.uint8)
276
+
277
+ # `monotonic_cst` validation is done in _validate_monotonic_cst
278
+ # at the estimator level and therefore the following should not be
279
+ # needed when using the public API.
280
+ if monotonic_cst is None:
281
+ monotonic_cst = np.full(
282
+ shape=X_binned.shape[1],
283
+ fill_value=MonotonicConstraint.NO_CST,
284
+ dtype=np.int8,
285
+ )
286
+ else:
287
+ monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8)
288
+ self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST)
289
+
290
+ if is_categorical is None:
291
+ is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8)
292
+ else:
293
+ is_categorical = np.asarray(is_categorical, dtype=np.uint8)
294
+
295
+ if np.any(
296
+ np.logical_and(
297
+ is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST
298
+ )
299
+ ):
300
+ raise ValueError("Categorical features cannot have monotonic constraints.")
301
+
302
+ hessians_are_constant = hessians.shape[0] == 1
303
+ self.histogram_builder = HistogramBuilder(
304
+ X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads
305
+ )
306
+ missing_values_bin_idx = n_bins - 1
307
+ self.splitter = Splitter(
308
+ X_binned=X_binned,
309
+ n_bins_non_missing=n_bins_non_missing,
310
+ missing_values_bin_idx=missing_values_bin_idx,
311
+ has_missing_values=has_missing_values,
312
+ is_categorical=is_categorical,
313
+ monotonic_cst=monotonic_cst,
314
+ l2_regularization=l2_regularization,
315
+ min_hessian_to_split=min_hessian_to_split,
316
+ min_samples_leaf=min_samples_leaf,
317
+ min_gain_to_split=min_gain_to_split,
318
+ hessians_are_constant=hessians_are_constant,
319
+ feature_fraction_per_split=feature_fraction_per_split,
320
+ rng=rng,
321
+ n_threads=n_threads,
322
+ )
323
+ self.X_binned = X_binned
324
+ self.max_leaf_nodes = max_leaf_nodes
325
+ self.max_depth = max_depth
326
+ self.min_samples_leaf = min_samples_leaf
327
+ self.min_gain_to_split = min_gain_to_split
328
+ self.n_bins_non_missing = n_bins_non_missing
329
+ self.missing_values_bin_idx = missing_values_bin_idx
330
+ self.has_missing_values = has_missing_values
331
+ self.is_categorical = is_categorical
332
+ self.monotonic_cst = monotonic_cst
333
+ self.interaction_cst = interaction_cst
334
+ self.l2_regularization = l2_regularization
335
+ self.shrinkage = shrinkage
336
+ self.n_features = X_binned.shape[1]
337
+ self.n_threads = n_threads
338
+ self.splittable_nodes = []
339
+ self.finalized_leaves = []
340
+ self.total_find_split_time = 0.0 # time spent finding the best splits
341
+ self.total_compute_hist_time = 0.0 # time spent computing histograms
342
+ self.total_apply_split_time = 0.0 # time spent splitting nodes
343
+ self.n_categorical_splits = 0
344
+ self._intilialize_root(gradients, hessians, hessians_are_constant)
345
+ self.n_nodes = 1
346
+
347
+ def _validate_parameters(
348
+ self,
349
+ X_binned,
350
+ min_gain_to_split,
351
+ min_hessian_to_split,
352
+ ):
353
+ """Validate parameters passed to __init__.
354
+
355
+ Also validate parameters passed to splitter.
356
+ """
357
+ if X_binned.dtype != np.uint8:
358
+ raise NotImplementedError("X_binned must be of type uint8.")
359
+ if not X_binned.flags.f_contiguous:
360
+ raise ValueError(
361
+ "X_binned should be passed as Fortran contiguous "
362
+ "array for maximum efficiency."
363
+ )
364
+ if min_gain_to_split < 0:
365
+ raise ValueError(
366
+ "min_gain_to_split={} must be positive.".format(min_gain_to_split)
367
+ )
368
+ if min_hessian_to_split < 0:
369
+ raise ValueError(
370
+ "min_hessian_to_split={} must be positive.".format(min_hessian_to_split)
371
+ )
372
+
373
+ def grow(self):
374
+ """Grow the tree, from root to leaves."""
375
+ while self.splittable_nodes:
376
+ self.split_next()
377
+
378
+ self._apply_shrinkage()
379
+
380
+ def _apply_shrinkage(self):
381
+ """Multiply leaves values by shrinkage parameter.
382
+
383
+ This must be done at the very end of the growing process. If this were
384
+ done during the growing process e.g. in finalize_leaf(), then a leaf
385
+ would be shrunk but its sibling would potentially not be (if it's a
386
+ non-leaf), which would lead to a wrong computation of the 'middle'
387
+ value needed to enforce the monotonic constraints.
388
+ """
389
+ for leaf in self.finalized_leaves:
390
+ leaf.value *= self.shrinkage
391
+
392
+ def _intilialize_root(self, gradients, hessians, hessians_are_constant):
393
+ """Initialize root node and finalize it if needed."""
394
+ n_samples = self.X_binned.shape[0]
395
+ depth = 0
396
+ sum_gradients = sum_parallel(gradients, self.n_threads)
397
+ if self.histogram_builder.hessians_are_constant:
398
+ sum_hessians = hessians[0] * n_samples
399
+ else:
400
+ sum_hessians = sum_parallel(hessians, self.n_threads)
401
+ self.root = TreeNode(
402
+ depth=depth,
403
+ sample_indices=self.splitter.partition,
404
+ sum_gradients=sum_gradients,
405
+ sum_hessians=sum_hessians,
406
+ value=0,
407
+ )
408
+
409
+ self.root.partition_start = 0
410
+ self.root.partition_stop = n_samples
411
+
412
+ if self.root.n_samples < 2 * self.min_samples_leaf:
413
+ # Do not even bother computing any splitting statistics.
414
+ self._finalize_leaf(self.root)
415
+ return
416
+ if sum_hessians < self.splitter.min_hessian_to_split:
417
+ self._finalize_leaf(self.root)
418
+ return
419
+
420
+ if self.interaction_cst is not None:
421
+ self.root.interaction_cst_indices = range(len(self.interaction_cst))
422
+ allowed_features = set().union(*self.interaction_cst)
423
+ self.root.allowed_features = np.fromiter(
424
+ allowed_features, dtype=np.uint32, count=len(allowed_features)
425
+ )
426
+
427
+ tic = time()
428
+ self.root.histograms = self.histogram_builder.compute_histograms_brute(
429
+ self.root.sample_indices, self.root.allowed_features
430
+ )
431
+ self.total_compute_hist_time += time() - tic
432
+
433
+ tic = time()
434
+ self._compute_best_split_and_push(self.root)
435
+ self.total_find_split_time += time() - tic
436
+
437
+ def _compute_best_split_and_push(self, node):
438
+ """Compute the best possible split (SplitInfo) of a given node.
439
+
440
+ Also push it in the heap of splittable nodes if gain isn't zero.
441
+ The gain of a node is 0 if either all the leaves are pure
442
+ (best gain = 0), or if no split would satisfy the constraints,
443
+ (min_hessians_to_split, min_gain_to_split, min_samples_leaf)
444
+ """
445
+
446
+ node.split_info = self.splitter.find_node_split(
447
+ n_samples=node.n_samples,
448
+ histograms=node.histograms,
449
+ sum_gradients=node.sum_gradients,
450
+ sum_hessians=node.sum_hessians,
451
+ value=node.value,
452
+ lower_bound=node.children_lower_bound,
453
+ upper_bound=node.children_upper_bound,
454
+ allowed_features=node.allowed_features,
455
+ )
456
+
457
+ if node.split_info.gain <= 0: # no valid split
458
+ self._finalize_leaf(node)
459
+ else:
460
+ heappush(self.splittable_nodes, node)
461
+
462
+ def split_next(self):
463
+ """Split the node with highest potential gain.
464
+
465
+ Returns
466
+ -------
467
+ left : TreeNode
468
+ The resulting left child.
469
+ right : TreeNode
470
+ The resulting right child.
471
+ """
472
+ # Consider the node with the highest loss reduction (a.k.a. gain)
473
+ node = heappop(self.splittable_nodes)
474
+
475
+ tic = time()
476
+ (
477
+ sample_indices_left,
478
+ sample_indices_right,
479
+ right_child_pos,
480
+ ) = self.splitter.split_indices(node.split_info, node.sample_indices)
481
+ self.total_apply_split_time += time() - tic
482
+
483
+ depth = node.depth + 1
484
+ n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)
485
+ n_leaf_nodes += 2
486
+
487
+ left_child_node = TreeNode(
488
+ depth,
489
+ sample_indices_left,
490
+ node.split_info.sum_gradient_left,
491
+ node.split_info.sum_hessian_left,
492
+ value=node.split_info.value_left,
493
+ )
494
+ right_child_node = TreeNode(
495
+ depth,
496
+ sample_indices_right,
497
+ node.split_info.sum_gradient_right,
498
+ node.split_info.sum_hessian_right,
499
+ value=node.split_info.value_right,
500
+ )
501
+
502
+ node.right_child = right_child_node
503
+ node.left_child = left_child_node
504
+
505
+ # set start and stop indices
506
+ left_child_node.partition_start = node.partition_start
507
+ left_child_node.partition_stop = node.partition_start + right_child_pos
508
+ right_child_node.partition_start = left_child_node.partition_stop
509
+ right_child_node.partition_stop = node.partition_stop
510
+
511
+ # set interaction constraints (the indices of the constraints sets)
512
+ if self.interaction_cst is not None:
513
+ # Calculate allowed_features and interaction_cst_indices only once. Child
514
+ # nodes inherit them before they get split.
515
+ (
516
+ left_child_node.allowed_features,
517
+ left_child_node.interaction_cst_indices,
518
+ ) = self._compute_interactions(node)
519
+ right_child_node.interaction_cst_indices = (
520
+ left_child_node.interaction_cst_indices
521
+ )
522
+ right_child_node.allowed_features = left_child_node.allowed_features
523
+
524
+ if not self.has_missing_values[node.split_info.feature_idx]:
525
+ # If no missing values are encountered at fit time, then samples
526
+ # with missing values during predict() will go to whichever child
527
+ # has the most samples.
528
+ node.split_info.missing_go_to_left = (
529
+ left_child_node.n_samples > right_child_node.n_samples
530
+ )
531
+
532
+ self.n_nodes += 2
533
+ self.n_categorical_splits += node.split_info.is_categorical
534
+
535
+ if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:
536
+ self._finalize_leaf(left_child_node)
537
+ self._finalize_leaf(right_child_node)
538
+ self._finalize_splittable_nodes()
539
+ return left_child_node, right_child_node
540
+
541
+ if self.max_depth is not None and depth == self.max_depth:
542
+ self._finalize_leaf(left_child_node)
543
+ self._finalize_leaf(right_child_node)
544
+ return left_child_node, right_child_node
545
+
546
+ if left_child_node.n_samples < self.min_samples_leaf * 2:
547
+ self._finalize_leaf(left_child_node)
548
+ if right_child_node.n_samples < self.min_samples_leaf * 2:
549
+ self._finalize_leaf(right_child_node)
550
+
551
+ if self.with_monotonic_cst:
552
+ # Set value bounds for respecting monotonic constraints
553
+ # See test_nodes_values() for details
554
+ if (
555
+ self.monotonic_cst[node.split_info.feature_idx]
556
+ == MonotonicConstraint.NO_CST
557
+ ):
558
+ lower_left = lower_right = node.children_lower_bound
559
+ upper_left = upper_right = node.children_upper_bound
560
+ else:
561
+ mid = (left_child_node.value + right_child_node.value) / 2
562
+ if (
563
+ self.monotonic_cst[node.split_info.feature_idx]
564
+ == MonotonicConstraint.POS
565
+ ):
566
+ lower_left, upper_left = node.children_lower_bound, mid
567
+ lower_right, upper_right = mid, node.children_upper_bound
568
+ else: # NEG
569
+ lower_left, upper_left = mid, node.children_upper_bound
570
+ lower_right, upper_right = node.children_lower_bound, mid
571
+ left_child_node.set_children_bounds(lower_left, upper_left)
572
+ right_child_node.set_children_bounds(lower_right, upper_right)
573
+
574
+ # Compute histograms of children, and compute their best possible split
575
+ # (if needed)
576
+ should_split_left = not left_child_node.is_leaf
577
+ should_split_right = not right_child_node.is_leaf
578
+ if should_split_left or should_split_right:
579
+ # We will compute the histograms of both nodes even if one of them
580
+ # is a leaf, since computing the second histogram is very cheap
581
+ # (using histogram subtraction).
582
+ n_samples_left = left_child_node.sample_indices.shape[0]
583
+ n_samples_right = right_child_node.sample_indices.shape[0]
584
+ if n_samples_left < n_samples_right:
585
+ smallest_child = left_child_node
586
+ largest_child = right_child_node
587
+ else:
588
+ smallest_child = right_child_node
589
+ largest_child = left_child_node
590
+
591
+ # We use the brute O(n_samples) method on the child that has the
592
+ # smallest number of samples, and the subtraction trick O(n_bins)
593
+ # on the other one.
594
+ # Note that both left and right child have the same allowed_features.
595
+ tic = time()
596
+ smallest_child.histograms = self.histogram_builder.compute_histograms_brute(
597
+ smallest_child.sample_indices, smallest_child.allowed_features
598
+ )
599
+ largest_child.histograms = (
600
+ self.histogram_builder.compute_histograms_subtraction(
601
+ node.histograms,
602
+ smallest_child.histograms,
603
+ smallest_child.allowed_features,
604
+ )
605
+ )
606
+ # node.histograms is reused in largest_child.histograms. To break cyclic
607
+ # memory references and help garbage collection, we set it to None.
608
+ node.histograms = None
609
+ self.total_compute_hist_time += time() - tic
610
+
611
+ tic = time()
612
+ if should_split_left:
613
+ self._compute_best_split_and_push(left_child_node)
614
+ if should_split_right:
615
+ self._compute_best_split_and_push(right_child_node)
616
+ self.total_find_split_time += time() - tic
617
+
618
+ # Release memory used by histograms as they are no longer needed
619
+ # for leaf nodes since they won't be split.
620
+ for child in (left_child_node, right_child_node):
621
+ if child.is_leaf:
622
+ del child.histograms
623
+
624
+ # Release memory used by histograms as they are no longer needed for
625
+ # internal nodes once children histograms have been computed.
626
+ del node.histograms
627
+
628
+ return left_child_node, right_child_node
629
+
630
+ def _compute_interactions(self, node):
631
+ r"""Compute features allowed by interactions to be inherited by child nodes.
632
+
633
+ Example: Assume constraints [{0, 1}, {1, 2}].
634
+ 1 <- Both constraint groups could be applied from now on
635
+ / \
636
+ 1 2 <- Left split still fulfills both constraint groups.
637
+ / \ / \ Right split at feature 2 has only group {1, 2} from now on.
638
+
639
+ LightGBM uses the same logic for overlapping groups. See
640
+ https://github.com/microsoft/LightGBM/issues/4481 for details.
641
+
642
+ Parameters:
643
+ ----------
644
+ node : TreeNode
645
+ A node that might have children. Based on its feature_idx, the interaction
646
+ constraints for possible child nodes are computed.
647
+
648
+ Returns
649
+ -------
650
+ allowed_features : ndarray, dtype=uint32
651
+ Indices of features allowed to split for children.
652
+ interaction_cst_indices : list of ints
653
+ Indices of the interaction sets that have to be applied on splits of
654
+ child nodes. The fewer sets the stronger the constraint as fewer sets
655
+ contain fewer features.
656
+ """
657
+ # Note:
658
+ # - Case of no interactions is already captured before function call.
659
+ # - This is for nodes that are already split and have a
660
+ # node.split_info.feature_idx.
661
+ allowed_features = set()
662
+ interaction_cst_indices = []
663
+ for i in node.interaction_cst_indices:
664
+ if node.split_info.feature_idx in self.interaction_cst[i]:
665
+ interaction_cst_indices.append(i)
666
+ allowed_features.update(self.interaction_cst[i])
667
+ return (
668
+ np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)),
669
+ interaction_cst_indices,
670
+ )
671
+
672
+ def _finalize_leaf(self, node):
673
+ """Make node a leaf of the tree being grown."""
674
+
675
+ node.is_leaf = True
676
+ self.finalized_leaves.append(node)
677
+
678
+ def _finalize_splittable_nodes(self):
679
+ """Transform all splittable nodes into leaves.
680
+
681
+ Used when some constraint is met e.g. maximum number of leaves or
682
+ maximum depth."""
683
+ while len(self.splittable_nodes) > 0:
684
+ node = self.splittable_nodes.pop()
685
+ self._finalize_leaf(node)
686
+
687
+ def make_predictor(self, binning_thresholds):
688
+ """Make a TreePredictor object out of the current tree.
689
+
690
+ Parameters
691
+ ----------
692
+ binning_thresholds : array-like of floats
693
+ Corresponds to the bin_thresholds_ attribute of the BinMapper.
694
+ For each feature, this stores:
695
+
696
+ - the bin frontiers for continuous features
697
+ - the unique raw category values for categorical features
698
+
699
+ Returns
700
+ -------
701
+ A TreePredictor object.
702
+ """
703
+ predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE)
704
+ binned_left_cat_bitsets = np.zeros(
705
+ (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
706
+ )
707
+ raw_left_cat_bitsets = np.zeros(
708
+ (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
709
+ )
710
+ _fill_predictor_arrays(
711
+ predictor_nodes,
712
+ binned_left_cat_bitsets,
713
+ raw_left_cat_bitsets,
714
+ self.root,
715
+ binning_thresholds,
716
+ self.n_bins_non_missing,
717
+ )
718
+ return TreePredictor(
719
+ predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets
720
+ )
721
+
722
+
723
+ def _fill_predictor_arrays(
724
+ predictor_nodes,
725
+ binned_left_cat_bitsets,
726
+ raw_left_cat_bitsets,
727
+ grower_node,
728
+ binning_thresholds,
729
+ n_bins_non_missing,
730
+ next_free_node_idx=0,
731
+ next_free_bitset_idx=0,
732
+ ):
733
+ """Helper used in make_predictor to set the TreePredictor fields."""
734
+ node = predictor_nodes[next_free_node_idx]
735
+ node["count"] = grower_node.n_samples
736
+ node["depth"] = grower_node.depth
737
+ if grower_node.split_info is not None:
738
+ node["gain"] = grower_node.split_info.gain
739
+ else:
740
+ node["gain"] = -1
741
+
742
+ node["value"] = grower_node.value
743
+
744
+ if grower_node.is_leaf:
745
+ # Leaf node
746
+ node["is_leaf"] = True
747
+ return next_free_node_idx + 1, next_free_bitset_idx
748
+
749
+ split_info = grower_node.split_info
750
+ feature_idx, bin_idx = split_info.feature_idx, split_info.bin_idx
751
+ node["feature_idx"] = feature_idx
752
+ node["bin_threshold"] = bin_idx
753
+ node["missing_go_to_left"] = split_info.missing_go_to_left
754
+ node["is_categorical"] = split_info.is_categorical
755
+
756
+ if split_info.bin_idx == n_bins_non_missing[feature_idx] - 1:
757
+ # Split is on the last non-missing bin: it's a "split on nans".
758
+ # All nans go to the right, the rest go to the left.
759
+ # Note: for categorical splits, bin_idx is 0 and we rely on the bitset
760
+ node["num_threshold"] = np.inf
761
+ elif split_info.is_categorical:
762
+ categories = binning_thresholds[feature_idx]
763
+ node["bitset_idx"] = next_free_bitset_idx
764
+ binned_left_cat_bitsets[next_free_bitset_idx] = split_info.left_cat_bitset
765
+ set_raw_bitset_from_binned_bitset(
766
+ raw_left_cat_bitsets[next_free_bitset_idx],
767
+ split_info.left_cat_bitset,
768
+ categories,
769
+ )
770
+ next_free_bitset_idx += 1
771
+ else:
772
+ node["num_threshold"] = binning_thresholds[feature_idx][bin_idx]
773
+
774
+ next_free_node_idx += 1
775
+
776
+ node["left"] = next_free_node_idx
777
+ next_free_node_idx, next_free_bitset_idx = _fill_predictor_arrays(
778
+ predictor_nodes,
779
+ binned_left_cat_bitsets,
780
+ raw_left_cat_bitsets,
781
+ grower_node.left_child,
782
+ binning_thresholds=binning_thresholds,
783
+ n_bins_non_missing=n_bins_non_missing,
784
+ next_free_node_idx=next_free_node_idx,
785
+ next_free_bitset_idx=next_free_bitset_idx,
786
+ )
787
+
788
+ node["right"] = next_free_node_idx
789
+ return _fill_predictor_arrays(
790
+ predictor_nodes,
791
+ binned_left_cat_bitsets,
792
+ raw_left_cat_bitsets,
793
+ grower_node.right_child,
794
+ binning_thresholds=binning_thresholds,
795
+ n_bins_non_missing=n_bins_non_missing,
796
+ next_free_node_idx=next_free_node_idx,
797
+ next_free_bitset_idx=next_free_bitset_idx,
798
+ )
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (328 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the TreePredictor class which is used for prediction.
3
+ """
4
+ # Author: Nicolas Hug
5
+
6
+ import numpy as np
7
+
8
+ from ._predictor import (
9
+ _compute_partial_dependence,
10
+ _predict_from_binned_data,
11
+ _predict_from_raw_data,
12
+ )
13
+ from .common import PREDICTOR_RECORD_DTYPE, Y_DTYPE
14
+
15
+
16
+ class TreePredictor:
17
+ """Tree class used for predictions.
18
+
19
+ Parameters
20
+ ----------
21
+ nodes : ndarray of PREDICTOR_RECORD_DTYPE
22
+ The nodes of the tree.
23
+ binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
24
+ Array of bitsets for binned categories used in predict_binned when a
25
+ split is categorical.
26
+ raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
27
+ Array of bitsets for raw categories used in predict when a split is
28
+ categorical.
29
+ """
30
+
31
+ def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
32
+ self.nodes = nodes
33
+ self.binned_left_cat_bitsets = binned_left_cat_bitsets
34
+ self.raw_left_cat_bitsets = raw_left_cat_bitsets
35
+
36
+ def get_n_leaf_nodes(self):
37
+ """Return number of leaves."""
38
+ return int(self.nodes["is_leaf"].sum())
39
+
40
+ def get_max_depth(self):
41
+ """Return maximum depth among all leaves."""
42
+ return int(self.nodes["depth"].max())
43
+
44
+ def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
45
+ """Predict raw values for non-binned data.
46
+
47
+ Parameters
48
+ ----------
49
+ X : ndarray, shape (n_samples, n_features)
50
+ The input samples.
51
+
52
+ known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
53
+ Array of bitsets of known categories, for each categorical feature.
54
+
55
+ f_idx_map : ndarray of shape (n_features,)
56
+ Map from original feature index to the corresponding index in the
57
+ known_cat_bitsets array.
58
+
59
+ n_threads : int
60
+ Number of OpenMP threads to use.
61
+
62
+ Returns
63
+ -------
64
+ y : ndarray, shape (n_samples,)
65
+ The raw predicted values.
66
+ """
67
+ out = np.empty(X.shape[0], dtype=Y_DTYPE)
68
+
69
+ _predict_from_raw_data(
70
+ self.nodes,
71
+ X,
72
+ self.raw_left_cat_bitsets,
73
+ known_cat_bitsets,
74
+ f_idx_map,
75
+ n_threads,
76
+ out,
77
+ )
78
+ return out
79
+
80
+ def predict_binned(self, X, missing_values_bin_idx, n_threads):
81
+ """Predict raw values for binned data.
82
+
83
+ Parameters
84
+ ----------
85
+ X : ndarray, shape (n_samples, n_features)
86
+ The input samples.
87
+ missing_values_bin_idx : uint8
88
+ Index of the bin that is used for missing values. This is the
89
+ index of the last bin and is always equal to max_bins (as passed
90
+ to the GBDT classes), or equivalently to n_bins - 1.
91
+ n_threads : int
92
+ Number of OpenMP threads to use.
93
+
94
+ Returns
95
+ -------
96
+ y : ndarray, shape (n_samples,)
97
+ The raw predicted values.
98
+ """
99
+ out = np.empty(X.shape[0], dtype=Y_DTYPE)
100
+ _predict_from_binned_data(
101
+ self.nodes,
102
+ X,
103
+ self.binned_left_cat_bitsets,
104
+ missing_values_bin_idx,
105
+ n_threads,
106
+ out,
107
+ )
108
+ return out
109
+
110
+ def compute_partial_dependence(self, grid, target_features, out):
111
+ """Fast partial dependence computation.
112
+
113
+ Parameters
114
+ ----------
115
+ grid : ndarray, shape (n_samples, n_target_features)
116
+ The grid points on which the partial dependence should be
117
+ evaluated.
118
+ target_features : ndarray, shape (n_target_features)
119
+ The set of target features for which the partial dependence
120
+ should be evaluated.
121
+ out : ndarray, shape (n_samples)
122
+ The value of the partial dependence function on each grid
123
+ point.
124
+ """
125
+ _compute_partial_dependence(self.nodes, grid, target_features, out)
126
+
127
+ def __setstate__(self, state):
128
+ try:
129
+ super().__setstate__(state)
130
+ except AttributeError:
131
+ self.__dict__.update(state)
132
+
133
+ # The dtype of feature_idx is np.intp which is platform dependent. Here, we
134
+ # make sure that saving and loading on different bitness systems works without
135
+ # errors. For instance, on a 64 bit Python runtime, np.intp = np.int64,
136
+ # while on 32 bit np.intp = np.int32.
137
+ #
138
+ # TODO: consider always using platform agnostic dtypes for fitted
139
+ # estimator attributes. For this particular estimator, this would
140
+ # mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32
141
+ # field. Ideally this should be done consistently throughout
142
+ # scikit-learn along with a common test.
143
+ if self.nodes.dtype != PREDICTOR_RECORD_DTYPE:
144
+ self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind")
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (369 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_array_equal
4
+
5
+ from sklearn.ensemble._hist_gradient_boosting.binning import (
6
+ _BinMapper,
7
+ _find_binning_thresholds,
8
+ _map_to_bins,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.common import (
11
+ ALMOST_INF,
12
+ X_BINNED_DTYPE,
13
+ X_DTYPE,
14
+ )
15
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
16
+
17
+ n_threads = _openmp_effective_n_threads()
18
+
19
+
20
+ DATA = (
21
+ np.random.RandomState(42)
22
+ .normal(loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2))
23
+ .astype(X_DTYPE)
24
+ )
25
+
26
+
27
+ def test_find_binning_thresholds_regular_data():
28
+ data = np.linspace(0, 10, 1001)
29
+ bin_thresholds = _find_binning_thresholds(data, max_bins=10)
30
+ assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
31
+
32
+ bin_thresholds = _find_binning_thresholds(data, max_bins=5)
33
+ assert_allclose(bin_thresholds, [2, 4, 6, 8])
34
+
35
+
36
+ def test_find_binning_thresholds_small_regular_data():
37
+ data = np.linspace(0, 10, 11)
38
+
39
+ bin_thresholds = _find_binning_thresholds(data, max_bins=5)
40
+ assert_allclose(bin_thresholds, [2, 4, 6, 8])
41
+
42
+ bin_thresholds = _find_binning_thresholds(data, max_bins=10)
43
+ assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
44
+
45
+ bin_thresholds = _find_binning_thresholds(data, max_bins=11)
46
+ assert_allclose(bin_thresholds, np.arange(10) + 0.5)
47
+
48
+ bin_thresholds = _find_binning_thresholds(data, max_bins=255)
49
+ assert_allclose(bin_thresholds, np.arange(10) + 0.5)
50
+
51
+
52
+ def test_find_binning_thresholds_random_data():
53
+ bin_thresholds = [
54
+ _find_binning_thresholds(DATA[:, i], max_bins=255) for i in range(2)
55
+ ]
56
+ for i in range(len(bin_thresholds)):
57
+ assert bin_thresholds[i].shape == (254,) # 255 - 1
58
+ assert bin_thresholds[i].dtype == DATA.dtype
59
+
60
+ assert_allclose(
61
+ bin_thresholds[0][[64, 128, 192]], np.array([-0.7, 0.0, 0.7]), atol=1e-1
62
+ )
63
+
64
+ assert_allclose(
65
+ bin_thresholds[1][[64, 128, 192]], np.array([9.99, 10.00, 10.01]), atol=1e-2
66
+ )
67
+
68
+
69
+ def test_find_binning_thresholds_low_n_bins():
70
+ bin_thresholds = [
71
+ _find_binning_thresholds(DATA[:, i], max_bins=128) for i in range(2)
72
+ ]
73
+ for i in range(len(bin_thresholds)):
74
+ assert bin_thresholds[i].shape == (127,) # 128 - 1
75
+ assert bin_thresholds[i].dtype == DATA.dtype
76
+
77
+
78
+ @pytest.mark.parametrize("n_bins", (2, 257))
79
+ def test_invalid_n_bins(n_bins):
80
+ err_msg = "n_bins={} should be no smaller than 3 and no larger than 256".format(
81
+ n_bins
82
+ )
83
+ with pytest.raises(ValueError, match=err_msg):
84
+ _BinMapper(n_bins=n_bins).fit(DATA)
85
+
86
+
87
+ def test_bin_mapper_n_features_transform():
88
+ mapper = _BinMapper(n_bins=42, random_state=42).fit(DATA)
89
+ err_msg = "This estimator was fitted with 2 features but 4 got passed"
90
+ with pytest.raises(ValueError, match=err_msg):
91
+ mapper.transform(np.repeat(DATA, 2, axis=1))
92
+
93
+
94
+ @pytest.mark.parametrize("max_bins", [16, 128, 255])
95
+ def test_map_to_bins(max_bins):
96
+ bin_thresholds = [
97
+ _find_binning_thresholds(DATA[:, i], max_bins=max_bins) for i in range(2)
98
+ ]
99
+ binned = np.zeros_like(DATA, dtype=X_BINNED_DTYPE, order="F")
100
+ is_categorical = np.zeros(2, dtype=np.uint8)
101
+ last_bin_idx = max_bins
102
+ _map_to_bins(DATA, bin_thresholds, is_categorical, last_bin_idx, n_threads, binned)
103
+ assert binned.shape == DATA.shape
104
+ assert binned.dtype == np.uint8
105
+ assert binned.flags.f_contiguous
106
+
107
+ min_indices = DATA.argmin(axis=0)
108
+ max_indices = DATA.argmax(axis=0)
109
+
110
+ for feature_idx, min_idx in enumerate(min_indices):
111
+ assert binned[min_idx, feature_idx] == 0
112
+ for feature_idx, max_idx in enumerate(max_indices):
113
+ assert binned[max_idx, feature_idx] == max_bins - 1
114
+
115
+
116
+ @pytest.mark.parametrize("max_bins", [5, 10, 42])
117
+ def test_bin_mapper_random_data(max_bins):
118
+ n_samples, n_features = DATA.shape
119
+
120
+ expected_count_per_bin = n_samples // max_bins
121
+ tol = int(0.05 * expected_count_per_bin)
122
+
123
+ # max_bins is the number of bins for non-missing values
124
+ n_bins = max_bins + 1
125
+ mapper = _BinMapper(n_bins=n_bins, random_state=42).fit(DATA)
126
+ binned = mapper.transform(DATA)
127
+
128
+ assert binned.shape == (n_samples, n_features)
129
+ assert binned.dtype == np.uint8
130
+ assert_array_equal(binned.min(axis=0), np.array([0, 0]))
131
+ assert_array_equal(binned.max(axis=0), np.array([max_bins - 1, max_bins - 1]))
132
+ assert len(mapper.bin_thresholds_) == n_features
133
+ for bin_thresholds_feature in mapper.bin_thresholds_:
134
+ assert bin_thresholds_feature.shape == (max_bins - 1,)
135
+ assert bin_thresholds_feature.dtype == DATA.dtype
136
+ assert np.all(mapper.n_bins_non_missing_ == max_bins)
137
+
138
+ # Check that the binned data is approximately balanced across bins.
139
+ for feature_idx in range(n_features):
140
+ for bin_idx in range(max_bins):
141
+ count = (binned[:, feature_idx] == bin_idx).sum()
142
+ assert abs(count - expected_count_per_bin) < tol
143
+
144
+
145
+ @pytest.mark.parametrize("n_samples, max_bins", [(5, 5), (5, 10), (5, 11), (42, 255)])
146
+ def test_bin_mapper_small_random_data(n_samples, max_bins):
147
+ data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)
148
+ assert len(np.unique(data)) == n_samples
149
+
150
+ # max_bins is the number of bins for non-missing values
151
+ n_bins = max_bins + 1
152
+ mapper = _BinMapper(n_bins=n_bins, random_state=42)
153
+ binned = mapper.fit_transform(data)
154
+
155
+ assert binned.shape == data.shape
156
+ assert binned.dtype == np.uint8
157
+ assert_array_equal(binned.ravel()[np.argsort(data.ravel())], np.arange(n_samples))
158
+
159
+
160
+ @pytest.mark.parametrize(
161
+ "max_bins, n_distinct, multiplier",
162
+ [
163
+ (5, 5, 1),
164
+ (5, 5, 3),
165
+ (255, 12, 42),
166
+ ],
167
+ )
168
+ def test_bin_mapper_identity_repeated_values(max_bins, n_distinct, multiplier):
169
+ data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)
170
+ # max_bins is the number of bins for non-missing values
171
+ n_bins = max_bins + 1
172
+ binned = _BinMapper(n_bins=n_bins).fit_transform(data)
173
+ assert_array_equal(data, binned)
174
+
175
+
176
+ @pytest.mark.parametrize("n_distinct", [2, 7, 42])
177
+ def test_bin_mapper_repeated_values_invariance(n_distinct):
178
+ rng = np.random.RandomState(42)
179
+ distinct_values = rng.normal(size=n_distinct)
180
+ assert len(np.unique(distinct_values)) == n_distinct
181
+
182
+ repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)
183
+ data = distinct_values[repeated_indices]
184
+ rng.shuffle(data)
185
+ assert_array_equal(np.unique(data), np.sort(distinct_values))
186
+
187
+ data = data.reshape(-1, 1)
188
+
189
+ mapper_1 = _BinMapper(n_bins=n_distinct + 1)
190
+ binned_1 = mapper_1.fit_transform(data)
191
+ assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))
192
+
193
+ # Adding more bins to the mapper yields the same results (same thresholds)
194
+ mapper_2 = _BinMapper(n_bins=min(256, n_distinct * 3) + 1)
195
+ binned_2 = mapper_2.fit_transform(data)
196
+
197
+ assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])
198
+ assert_array_equal(binned_1, binned_2)
199
+
200
+
201
+ @pytest.mark.parametrize(
202
+ "max_bins, scale, offset",
203
+ [
204
+ (3, 2, -1),
205
+ (42, 1, 0),
206
+ (255, 0.3, 42),
207
+ ],
208
+ )
209
+ def test_bin_mapper_identity_small(max_bins, scale, offset):
210
+ data = np.arange(max_bins).reshape(-1, 1) * scale + offset
211
+ # max_bins is the number of bins for non-missing values
212
+ n_bins = max_bins + 1
213
+ binned = _BinMapper(n_bins=n_bins).fit_transform(data)
214
+ assert_array_equal(binned, np.arange(max_bins).reshape(-1, 1))
215
+
216
+
217
+ @pytest.mark.parametrize(
218
+ "max_bins_small, max_bins_large",
219
+ [
220
+ (2, 2),
221
+ (3, 3),
222
+ (4, 4),
223
+ (42, 42),
224
+ (255, 255),
225
+ (5, 17),
226
+ (42, 255),
227
+ ],
228
+ )
229
+ def test_bin_mapper_idempotence(max_bins_small, max_bins_large):
230
+ assert max_bins_large >= max_bins_small
231
+ data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)
232
+ mapper_small = _BinMapper(n_bins=max_bins_small + 1)
233
+ mapper_large = _BinMapper(n_bins=max_bins_small + 1)
234
+ binned_small = mapper_small.fit_transform(data)
235
+ binned_large = mapper_large.fit_transform(binned_small)
236
+ assert_array_equal(binned_small, binned_large)
237
+
238
+
239
+ @pytest.mark.parametrize("n_bins", [10, 100, 256])
240
+ @pytest.mark.parametrize("diff", [-5, 0, 5])
241
+ def test_n_bins_non_missing(n_bins, diff):
242
+ # Check that n_bins_non_missing is n_unique_values when
243
+ # there are not a lot of unique values, else n_bins - 1.
244
+
245
+ n_unique_values = n_bins + diff
246
+ X = list(range(n_unique_values)) * 2
247
+ X = np.array(X).reshape(-1, 1)
248
+ mapper = _BinMapper(n_bins=n_bins).fit(X)
249
+ assert np.all(mapper.n_bins_non_missing_ == min(n_bins - 1, n_unique_values))
250
+
251
+
252
+ def test_subsample():
253
+ # Make sure bin thresholds are different when applying subsampling
254
+ mapper_no_subsample = _BinMapper(subsample=None, random_state=0).fit(DATA)
255
+ mapper_subsample = _BinMapper(subsample=256, random_state=0).fit(DATA)
256
+
257
+ for feature in range(DATA.shape[1]):
258
+ assert not np.allclose(
259
+ mapper_no_subsample.bin_thresholds_[feature],
260
+ mapper_subsample.bin_thresholds_[feature],
261
+ rtol=1e-4,
262
+ )
263
+
264
+
265
+ @pytest.mark.parametrize(
266
+ "n_bins, n_bins_non_missing, X_trans_expected",
267
+ [
268
+ (
269
+ 256,
270
+ [4, 2, 2],
271
+ [
272
+ [0, 0, 0], # 255 <=> missing value
273
+ [255, 255, 0],
274
+ [1, 0, 0],
275
+ [255, 1, 1],
276
+ [2, 1, 1],
277
+ [3, 0, 0],
278
+ ],
279
+ ),
280
+ (
281
+ 3,
282
+ [2, 2, 2],
283
+ [
284
+ [0, 0, 0], # 2 <=> missing value
285
+ [2, 2, 0],
286
+ [0, 0, 0],
287
+ [2, 1, 1],
288
+ [1, 1, 1],
289
+ [1, 0, 0],
290
+ ],
291
+ ),
292
+ ],
293
+ )
294
+ def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):
295
+ # check for missing values: make sure nans are mapped to the last bin
296
+ # and that the _BinMapper attributes are correct
297
+
298
+ X = [
299
+ [1, 1, 0],
300
+ [np.nan, np.nan, 0],
301
+ [2, 1, 0],
302
+ [np.nan, 2, 1],
303
+ [3, 2, 1],
304
+ [4, 1, 0],
305
+ ]
306
+
307
+ X = np.array(X)
308
+
309
+ mapper = _BinMapper(n_bins=n_bins)
310
+ mapper.fit(X)
311
+
312
+ assert_array_equal(mapper.n_bins_non_missing_, n_bins_non_missing)
313
+
314
+ for feature_idx in range(X.shape[1]):
315
+ assert (
316
+ len(mapper.bin_thresholds_[feature_idx])
317
+ == n_bins_non_missing[feature_idx] - 1
318
+ )
319
+
320
+ assert mapper.missing_values_bin_idx_ == n_bins - 1
321
+
322
+ X_trans = mapper.transform(X)
323
+ assert_array_equal(X_trans, X_trans_expected)
324
+
325
+
326
+ def test_infinite_values():
327
+ # Make sure infinite values are properly handled.
328
+ bin_mapper = _BinMapper()
329
+
330
+ X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
331
+
332
+ bin_mapper.fit(X)
333
+ assert_allclose(bin_mapper.bin_thresholds_[0], [-np.inf, 0.5, ALMOST_INF])
334
+ assert bin_mapper.n_bins_non_missing_ == [4]
335
+
336
+ expected_binned_X = np.array([0, 1, 2, 3]).reshape(-1, 1)
337
+ assert_array_equal(bin_mapper.transform(X), expected_binned_X)
338
+
339
+
340
+ @pytest.mark.parametrize("n_bins", [15, 256])
341
+ def test_categorical_feature(n_bins):
342
+ # Basic test for categorical features
343
+ # we make sure that categories are mapped into [0, n_categories - 1] and
344
+ # that nans are mapped to the last bin
345
+ X = np.array(
346
+ [[4] * 500 + [1] * 3 + [10] * 4 + [0] * 4 + [13] + [7] * 5 + [np.nan] * 2],
347
+ dtype=X_DTYPE,
348
+ ).T
349
+ known_categories = [np.unique(X[~np.isnan(X)])]
350
+
351
+ bin_mapper = _BinMapper(
352
+ n_bins=n_bins,
353
+ is_categorical=np.array([True]),
354
+ known_categories=known_categories,
355
+ ).fit(X)
356
+ assert bin_mapper.n_bins_non_missing_ == [6]
357
+ assert_array_equal(bin_mapper.bin_thresholds_[0], [0, 1, 4, 7, 10, 13])
358
+
359
+ X = np.array([[0, 1, 4, np.nan, 7, 10, 13]], dtype=X_DTYPE).T
360
+ expected_trans = np.array([[0, 1, 2, n_bins - 1, 3, 4, 5]]).T
361
+ assert_array_equal(bin_mapper.transform(X), expected_trans)
362
+
363
+ # Negative categories are mapped to the missing values' bin
364
+ # (i.e. the bin of index `missing_values_bin_idx_ == n_bins - 1).
365
+ # Unknown positive categories does not happen in practice and tested
366
+ # for illustration purpose.
367
+ X = np.array([[-4, -1, 100]], dtype=X_DTYPE).T
368
+ expected_trans = np.array([[n_bins - 1, n_bins - 1, 6]]).T
369
+ assert_array_equal(bin_mapper.transform(X), expected_trans)
370
+
371
+
372
+ def test_categorical_feature_negative_missing():
373
+ """Make sure bin mapper treats negative categories as missing values."""
374
+ X = np.array(
375
+ [[4] * 500 + [1] * 3 + [5] * 10 + [-1] * 3 + [np.nan] * 4], dtype=X_DTYPE
376
+ ).T
377
+ bin_mapper = _BinMapper(
378
+ n_bins=4,
379
+ is_categorical=np.array([True]),
380
+ known_categories=[np.array([1, 4, 5], dtype=X_DTYPE)],
381
+ ).fit(X)
382
+
383
+ assert bin_mapper.n_bins_non_missing_ == [3]
384
+
385
+ X = np.array([[-1, 1, 3, 5, np.nan]], dtype=X_DTYPE).T
386
+
387
+ # Negative values for categorical features are considered as missing values.
388
+ # They are mapped to the bin of index `bin_mapper.missing_values_bin_idx_`,
389
+ # which is 3 here.
390
+ assert bin_mapper.missing_values_bin_idx_ == 3
391
+ expected_trans = np.array([[3, 0, 1, 2, 3]]).T
392
+ assert_array_equal(bin_mapper.transform(X), expected_trans)
393
+
394
+
395
+ @pytest.mark.parametrize("n_bins", (128, 256))
396
+ def test_categorical_with_numerical_features(n_bins):
397
+ # basic check for binmapper with mixed data
398
+ X1 = np.arange(10, 20).reshape(-1, 1) # numerical
399
+ X2 = np.arange(10, 15).reshape(-1, 1) # categorical
400
+ X2 = np.r_[X2, X2]
401
+ X = np.c_[X1, X2]
402
+ known_categories = [None, np.unique(X2).astype(X_DTYPE)]
403
+
404
+ bin_mapper = _BinMapper(
405
+ n_bins=n_bins,
406
+ is_categorical=np.array([False, True]),
407
+ known_categories=known_categories,
408
+ ).fit(X)
409
+
410
+ assert_array_equal(bin_mapper.n_bins_non_missing_, [10, 5])
411
+
412
+ bin_thresholds = bin_mapper.bin_thresholds_
413
+ assert len(bin_thresholds) == 2
414
+ assert_array_equal(bin_thresholds[1], np.arange(10, 15))
415
+
416
+ expected_X_trans = [
417
+ [0, 0],
418
+ [1, 1],
419
+ [2, 2],
420
+ [3, 3],
421
+ [4, 4],
422
+ [5, 0],
423
+ [6, 1],
424
+ [7, 2],
425
+ [8, 3],
426
+ [9, 4],
427
+ ]
428
+ assert_array_equal(bin_mapper.transform(X), expected_X_trans)
429
+
430
+
431
+ def test_make_known_categories_bitsets():
432
+ # Check the output of make_known_categories_bitsets
433
+ X = np.array(
434
+ [[14, 2, 30], [30, 4, 70], [40, 10, 180], [40, 240, 180]], dtype=X_DTYPE
435
+ )
436
+
437
+ bin_mapper = _BinMapper(
438
+ n_bins=256,
439
+ is_categorical=np.array([False, True, True]),
440
+ known_categories=[None, X[:, 1], X[:, 2]],
441
+ )
442
+ bin_mapper.fit(X)
443
+
444
+ known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
445
+
446
+ # Note that for non-categorical features, values are left to 0
447
+ expected_f_idx_map = np.array([0, 0, 1], dtype=np.uint8)
448
+ assert_allclose(expected_f_idx_map, f_idx_map)
449
+
450
+ expected_cat_bitset = np.zeros((2, 8), dtype=np.uint32)
451
+
452
+ # first categorical feature: [2, 4, 10, 240]
453
+ f_idx = 1
454
+ mapped_f_idx = f_idx_map[f_idx]
455
+ expected_cat_bitset[mapped_f_idx, 0] = 2**2 + 2**4 + 2**10
456
+ # 240 = 32**7 + 16, therefore the 16th bit of the 7th array is 1.
457
+ expected_cat_bitset[mapped_f_idx, 7] = 2**16
458
+
459
+ # second categorical feature [30, 70, 180]
460
+ f_idx = 2
461
+ mapped_f_idx = f_idx_map[f_idx]
462
+ expected_cat_bitset[mapped_f_idx, 0] = 2**30
463
+ expected_cat_bitset[mapped_f_idx, 2] = 2**6
464
+ expected_cat_bitset[mapped_f_idx, 5] = 2**20
465
+
466
+ assert_allclose(expected_cat_bitset, known_cat_bitsets)
467
+
468
+
469
+ @pytest.mark.parametrize(
470
+ "is_categorical, known_categories, match",
471
+ [
472
+ (np.array([True]), [None], "Known categories for feature 0 must be provided"),
473
+ (
474
+ np.array([False]),
475
+ np.array([1, 2, 3]),
476
+ "isn't marked as a categorical feature, but categories were passed",
477
+ ),
478
+ ],
479
+ )
480
+ def test_categorical_parameters(is_categorical, known_categories, match):
481
+ # test the validation of the is_categorical and known_categories parameters
482
+
483
+ X = np.array([[1, 2, 3]], dtype=X_DTYPE)
484
+
485
+ bin_mapper = _BinMapper(
486
+ is_categorical=is_categorical, known_categories=known_categories
487
+ )
488
+ with pytest.raises(ValueError, match=match):
489
+ bin_mapper.fit(X)
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose
4
+
5
+ from sklearn.ensemble._hist_gradient_boosting._bitset import (
6
+ in_bitset_memoryview,
7
+ set_bitset_memoryview,
8
+ set_raw_bitset_from_binned_bitset,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "values_to_insert, expected_bitset",
15
+ [
16
+ ([0, 4, 33], np.array([2**0 + 2**4, 2**1, 0], dtype=np.uint32)),
17
+ (
18
+ [31, 32, 33, 79],
19
+ np.array([2**31, 2**0 + 2**1, 2**15], dtype=np.uint32),
20
+ ),
21
+ ],
22
+ )
23
+ def test_set_get_bitset(values_to_insert, expected_bitset):
24
+ n_32bits_ints = 3
25
+ bitset = np.zeros(n_32bits_ints, dtype=np.uint32)
26
+ for value in values_to_insert:
27
+ set_bitset_memoryview(bitset, value)
28
+ assert_allclose(expected_bitset, bitset)
29
+ for value in range(32 * n_32bits_ints):
30
+ if value in values_to_insert:
31
+ assert in_bitset_memoryview(bitset, value)
32
+ else:
33
+ assert not in_bitset_memoryview(bitset, value)
34
+
35
+
36
+ @pytest.mark.parametrize(
37
+ "raw_categories, binned_cat_to_insert, expected_raw_bitset",
38
+ [
39
+ (
40
+ [3, 4, 5, 10, 31, 32, 43],
41
+ [0, 2, 4, 5, 6],
42
+ [2**3 + 2**5 + 2**31, 2**0 + 2**11],
43
+ ),
44
+ ([3, 33, 50, 52], [1, 3], [0, 2**1 + 2**20]),
45
+ ],
46
+ )
47
+ def test_raw_bitset_from_binned_bitset(
48
+ raw_categories, binned_cat_to_insert, expected_raw_bitset
49
+ ):
50
+ binned_bitset = np.zeros(2, dtype=np.uint32)
51
+ raw_bitset = np.zeros(2, dtype=np.uint32)
52
+ raw_categories = np.asarray(raw_categories, dtype=X_DTYPE)
53
+
54
+ for val in binned_cat_to_insert:
55
+ set_bitset_memoryview(binned_bitset, val)
56
+
57
+ set_raw_bitset_from_binned_bitset(raw_bitset, binned_bitset, raw_categories)
58
+
59
+ assert_allclose(expected_raw_bitset, raw_bitset)
60
+ for binned_cat_val, raw_cat_val in enumerate(raw_categories):
61
+ if binned_cat_val in binned_cat_to_insert:
62
+ assert in_bitset_memoryview(raw_bitset, raw_cat_val)
63
+ else:
64
+ assert not in_bitset_memoryview(raw_bitset, raw_cat_val)
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.datasets import make_classification, make_regression
5
+ from sklearn.ensemble import (
6
+ HistGradientBoostingClassifier,
7
+ HistGradientBoostingRegressor,
8
+ )
9
+ from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
10
+ from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
11
+ from sklearn.metrics import accuracy_score
12
+ from sklearn.model_selection import train_test_split
13
+
14
+
15
+ @pytest.mark.parametrize("seed", range(5))
16
+ @pytest.mark.parametrize(
17
+ "loss",
18
+ [
19
+ "squared_error",
20
+ "poisson",
21
+ pytest.param(
22
+ "gamma",
23
+ marks=pytest.mark.skip("LightGBM with gamma loss has larger deviation."),
24
+ ),
25
+ ],
26
+ )
27
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
28
+ @pytest.mark.parametrize(
29
+ "n_samples, max_leaf_nodes",
30
+ [
31
+ (255, 4096),
32
+ (1000, 8),
33
+ ],
34
+ )
35
+ def test_same_predictions_regression(
36
+ seed, loss, min_samples_leaf, n_samples, max_leaf_nodes
37
+ ):
38
+ # Make sure sklearn has the same predictions as lightgbm for easy targets.
39
+ #
40
+ # In particular when the size of the trees are bound and the number of
41
+ # samples is large enough, the structure of the prediction trees found by
42
+ # LightGBM and sklearn should be exactly identical.
43
+ #
44
+ # Notes:
45
+ # - Several candidate splits may have equal gains when the number of
46
+ # samples in a node is low (and because of float errors). Therefore the
47
+ # predictions on the test set might differ if the structure of the tree
48
+ # is not exactly the same. To avoid this issue we only compare the
49
+ # predictions on the test set when the number of samples is large enough
50
+ # and max_leaf_nodes is low enough.
51
+ # - To ignore discrepancies caused by small differences in the binning
52
+ # strategy, data is pre-binned if n_samples > 255.
53
+ # - We don't check the absolute_error loss here. This is because
54
+ # LightGBM's computation of the median (used for the initial value of
55
+ # raw_prediction) is a bit off (they'll e.g. return midpoints when there
56
+ # is no need to.). Since these tests only run 1 iteration, the
57
+ # discrepancy between the initial values leads to biggish differences in
58
+ # the predictions. These differences are much smaller with more
59
+ # iterations.
60
+ pytest.importorskip("lightgbm")
61
+
62
+ rng = np.random.RandomState(seed=seed)
63
+ max_iter = 1
64
+ max_bins = 255
65
+
66
+ X, y = make_regression(
67
+ n_samples=n_samples, n_features=5, n_informative=5, random_state=0
68
+ )
69
+
70
+ if loss in ("gamma", "poisson"):
71
+ # make the target positive
72
+ y = np.abs(y) + np.mean(np.abs(y))
73
+
74
+ if n_samples > 255:
75
+ # bin data and convert it to float32 so that the estimator doesn't
76
+ # treat it as pre-binned
77
+ X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
78
+
79
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
80
+
81
+ est_sklearn = HistGradientBoostingRegressor(
82
+ loss=loss,
83
+ max_iter=max_iter,
84
+ max_bins=max_bins,
85
+ learning_rate=1,
86
+ early_stopping=False,
87
+ min_samples_leaf=min_samples_leaf,
88
+ max_leaf_nodes=max_leaf_nodes,
89
+ )
90
+ est_lightgbm = get_equivalent_estimator(est_sklearn, lib="lightgbm")
91
+ est_lightgbm.set_params(min_sum_hessian_in_leaf=0)
92
+
93
+ est_lightgbm.fit(X_train, y_train)
94
+ est_sklearn.fit(X_train, y_train)
95
+
96
+ # We need X to be treated an numerical data, not pre-binned data.
97
+ X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
98
+
99
+ pred_lightgbm = est_lightgbm.predict(X_train)
100
+ pred_sklearn = est_sklearn.predict(X_train)
101
+ if loss in ("gamma", "poisson"):
102
+ # More than 65% of the predictions must be close up to the 2nd decimal.
103
+ # TODO: We are not entirely satisfied with this lax comparison, but the root
104
+ # cause is not clear, maybe algorithmic differences. One such example is the
105
+ # poisson_max_delta_step parameter of LightGBM which does not exist in HGBT.
106
+ assert (
107
+ np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-2, atol=1e-2))
108
+ > 0.65
109
+ )
110
+ else:
111
+ # Less than 1% of the predictions may deviate more than 1e-3 in relative terms.
112
+ assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-3)) > 1 - 0.01
113
+
114
+ if max_leaf_nodes < 10 and n_samples >= 1000 and loss in ("squared_error",):
115
+ pred_lightgbm = est_lightgbm.predict(X_test)
116
+ pred_sklearn = est_sklearn.predict(X_test)
117
+ # Less than 1% of the predictions may deviate more than 1e-4 in relative terms.
118
+ assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-4)) > 1 - 0.01
119
+
120
+
121
+ @pytest.mark.parametrize("seed", range(5))
122
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
123
+ @pytest.mark.parametrize(
124
+ "n_samples, max_leaf_nodes",
125
+ [
126
+ (255, 4096),
127
+ (1000, 8),
128
+ ],
129
+ )
130
+ def test_same_predictions_classification(
131
+ seed, min_samples_leaf, n_samples, max_leaf_nodes
132
+ ):
133
+ # Same as test_same_predictions_regression but for classification
134
+ pytest.importorskip("lightgbm")
135
+
136
+ rng = np.random.RandomState(seed=seed)
137
+ max_iter = 1
138
+ n_classes = 2
139
+ max_bins = 255
140
+
141
+ X, y = make_classification(
142
+ n_samples=n_samples,
143
+ n_classes=n_classes,
144
+ n_features=5,
145
+ n_informative=5,
146
+ n_redundant=0,
147
+ random_state=0,
148
+ )
149
+
150
+ if n_samples > 255:
151
+ # bin data and convert it to float32 so that the estimator doesn't
152
+ # treat it as pre-binned
153
+ X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
154
+
155
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
156
+
157
+ est_sklearn = HistGradientBoostingClassifier(
158
+ loss="log_loss",
159
+ max_iter=max_iter,
160
+ max_bins=max_bins,
161
+ learning_rate=1,
162
+ early_stopping=False,
163
+ min_samples_leaf=min_samples_leaf,
164
+ max_leaf_nodes=max_leaf_nodes,
165
+ )
166
+ est_lightgbm = get_equivalent_estimator(
167
+ est_sklearn, lib="lightgbm", n_classes=n_classes
168
+ )
169
+
170
+ est_lightgbm.fit(X_train, y_train)
171
+ est_sklearn.fit(X_train, y_train)
172
+
173
+ # We need X to be treated an numerical data, not pre-binned data.
174
+ X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
175
+
176
+ pred_lightgbm = est_lightgbm.predict(X_train)
177
+ pred_sklearn = est_sklearn.predict(X_train)
178
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
179
+
180
+ acc_lightgbm = accuracy_score(y_train, pred_lightgbm)
181
+ acc_sklearn = accuracy_score(y_train, pred_sklearn)
182
+ np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn)
183
+
184
+ if max_leaf_nodes < 10 and n_samples >= 1000:
185
+ pred_lightgbm = est_lightgbm.predict(X_test)
186
+ pred_sklearn = est_sklearn.predict(X_test)
187
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
188
+
189
+ acc_lightgbm = accuracy_score(y_test, pred_lightgbm)
190
+ acc_sklearn = accuracy_score(y_test, pred_sklearn)
191
+ np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2)
192
+
193
+
194
+ @pytest.mark.parametrize("seed", range(5))
195
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
196
+ @pytest.mark.parametrize(
197
+ "n_samples, max_leaf_nodes",
198
+ [
199
+ (255, 4096),
200
+ (10000, 8),
201
+ ],
202
+ )
203
+ def test_same_predictions_multiclass_classification(
204
+ seed, min_samples_leaf, n_samples, max_leaf_nodes
205
+ ):
206
+ # Same as test_same_predictions_regression but for classification
207
+ pytest.importorskip("lightgbm")
208
+
209
+ rng = np.random.RandomState(seed=seed)
210
+ n_classes = 3
211
+ max_iter = 1
212
+ max_bins = 255
213
+ lr = 1
214
+
215
+ X, y = make_classification(
216
+ n_samples=n_samples,
217
+ n_classes=n_classes,
218
+ n_features=5,
219
+ n_informative=5,
220
+ n_redundant=0,
221
+ n_clusters_per_class=1,
222
+ random_state=0,
223
+ )
224
+
225
+ if n_samples > 255:
226
+ # bin data and convert it to float32 so that the estimator doesn't
227
+ # treat it as pre-binned
228
+ X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
229
+
230
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
231
+
232
+ est_sklearn = HistGradientBoostingClassifier(
233
+ loss="log_loss",
234
+ max_iter=max_iter,
235
+ max_bins=max_bins,
236
+ learning_rate=lr,
237
+ early_stopping=False,
238
+ min_samples_leaf=min_samples_leaf,
239
+ max_leaf_nodes=max_leaf_nodes,
240
+ )
241
+ est_lightgbm = get_equivalent_estimator(
242
+ est_sklearn, lib="lightgbm", n_classes=n_classes
243
+ )
244
+
245
+ est_lightgbm.fit(X_train, y_train)
246
+ est_sklearn.fit(X_train, y_train)
247
+
248
+ # We need X to be treated an numerical data, not pre-binned data.
249
+ X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
250
+
251
+ pred_lightgbm = est_lightgbm.predict(X_train)
252
+ pred_sklearn = est_sklearn.predict(X_train)
253
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
254
+
255
+ proba_lightgbm = est_lightgbm.predict_proba(X_train)
256
+ proba_sklearn = est_sklearn.predict_proba(X_train)
257
+ # assert more than 75% of the predicted probabilities are the same up to
258
+ # the second decimal
259
+ assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75
260
+
261
+ acc_lightgbm = accuracy_score(y_train, pred_lightgbm)
262
+ acc_sklearn = accuracy_score(y_train, pred_sklearn)
263
+
264
+ np.testing.assert_allclose(acc_lightgbm, acc_sklearn, rtol=0, atol=5e-2)
265
+
266
+ if max_leaf_nodes < 10 and n_samples >= 1000:
267
+ pred_lightgbm = est_lightgbm.predict(X_test)
268
+ pred_sklearn = est_sklearn.predict(X_test)
269
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
270
+
271
+ proba_lightgbm = est_lightgbm.predict_proba(X_train)
272
+ proba_sklearn = est_sklearn.predict_proba(X_train)
273
+ # assert more than 75% of the predicted probabilities are the same up
274
+ # to the second decimal
275
+ assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75
276
+
277
+ acc_lightgbm = accuracy_score(y_test, pred_lightgbm)
278
+ acc_sklearn = accuracy_score(y_test, pred_sklearn)
279
+ np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2)
llmeval-env/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py ADDED
@@ -0,0 +1,1683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copyreg
2
+ import io
3
+ import pickle
4
+ import re
5
+ import warnings
6
+ from unittest.mock import Mock
7
+
8
+ import joblib
9
+ import numpy as np
10
+ import pytest
11
+ from joblib.numpy_pickle import NumpyPickler
12
+ from numpy.testing import assert_allclose, assert_array_equal
13
+
14
+ import sklearn
15
+ from sklearn._loss.loss import (
16
+ AbsoluteError,
17
+ HalfBinomialLoss,
18
+ HalfSquaredError,
19
+ PinballLoss,
20
+ )
21
+ from sklearn.base import BaseEstimator, TransformerMixin, clone, is_regressor
22
+ from sklearn.compose import make_column_transformer
23
+ from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression
24
+ from sklearn.dummy import DummyRegressor
25
+ from sklearn.ensemble import (
26
+ HistGradientBoostingClassifier,
27
+ HistGradientBoostingRegressor,
28
+ )
29
+ from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
30
+ from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
31
+ from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
32
+ from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
33
+ from sklearn.exceptions import NotFittedError
34
+ from sklearn.metrics import get_scorer, mean_gamma_deviance, mean_poisson_deviance
35
+ from sklearn.model_selection import cross_val_score, train_test_split
36
+ from sklearn.pipeline import make_pipeline
37
+ from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder
38
+ from sklearn.utils import _IS_32BIT, shuffle
39
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
40
+ from sklearn.utils._testing import _convert_container
41
+
42
+ n_threads = _openmp_effective_n_threads()
43
+
44
+ X_classification, y_classification = make_classification(random_state=0)
45
+ X_regression, y_regression = make_regression(random_state=0)
46
+ X_multi_classification, y_multi_classification = make_classification(
47
+ n_classes=3, n_informative=3, random_state=0
48
+ )
49
+
50
+
51
+ def _make_dumb_dataset(n_samples):
52
+ """Make a dumb dataset to test early stopping."""
53
+ rng = np.random.RandomState(42)
54
+ X_dumb = rng.randn(n_samples, 1)
55
+ y_dumb = (X_dumb[:, 0] > 0).astype("int64")
56
+ return X_dumb, y_dumb
57
+
58
+
59
+ @pytest.mark.parametrize(
60
+ "GradientBoosting, X, y",
61
+ [
62
+ (HistGradientBoostingClassifier, X_classification, y_classification),
63
+ (HistGradientBoostingRegressor, X_regression, y_regression),
64
+ ],
65
+ )
66
+ @pytest.mark.parametrize(
67
+ "params, err_msg",
68
+ [
69
+ (
70
+ {"interaction_cst": [0, 1]},
71
+ "Interaction constraints must be a sequence of tuples or lists",
72
+ ),
73
+ (
74
+ {"interaction_cst": [{0, 9999}]},
75
+ r"Interaction constraints must consist of integer indices in \[0,"
76
+ r" n_features - 1\] = \[.*\], specifying the position of features,",
77
+ ),
78
+ (
79
+ {"interaction_cst": [{-1, 0}]},
80
+ r"Interaction constraints must consist of integer indices in \[0,"
81
+ r" n_features - 1\] = \[.*\], specifying the position of features,",
82
+ ),
83
+ (
84
+ {"interaction_cst": [{0.5}]},
85
+ r"Interaction constraints must consist of integer indices in \[0,"
86
+ r" n_features - 1\] = \[.*\], specifying the position of features,",
87
+ ),
88
+ ],
89
+ )
90
+ def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
91
+ with pytest.raises(ValueError, match=err_msg):
92
+ GradientBoosting(**params).fit(X, y)
93
+
94
+
95
+ @pytest.mark.parametrize(
96
+ "scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
97
+ [
98
+ ("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer
99
+ ("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train
100
+ (None, 0.1, True, 5, 1e-7), # same with default scorer
101
+ (None, None, True, 5, 1e-1),
102
+ ("loss", 0.1, True, 5, 1e-7), # use loss
103
+ ("loss", None, True, 5, 1e-1), # use loss on training data
104
+ (None, None, False, 5, 0.0), # no early stopping
105
+ ],
106
+ )
107
+ def test_early_stopping_regression(
108
+ scoring, validation_fraction, early_stopping, n_iter_no_change, tol
109
+ ):
110
+ max_iter = 200
111
+
112
+ X, y = make_regression(n_samples=50, random_state=0)
113
+
114
+ gb = HistGradientBoostingRegressor(
115
+ verbose=1, # just for coverage
116
+ min_samples_leaf=5, # easier to overfit fast
117
+ scoring=scoring,
118
+ tol=tol,
119
+ early_stopping=early_stopping,
120
+ validation_fraction=validation_fraction,
121
+ max_iter=max_iter,
122
+ n_iter_no_change=n_iter_no_change,
123
+ random_state=0,
124
+ )
125
+ gb.fit(X, y)
126
+
127
+ if early_stopping:
128
+ assert n_iter_no_change <= gb.n_iter_ < max_iter
129
+ else:
130
+ assert gb.n_iter_ == max_iter
131
+
132
+
133
+ @pytest.mark.parametrize(
134
+ "data",
135
+ (
136
+ make_classification(n_samples=30, random_state=0),
137
+ make_classification(
138
+ n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0
139
+ ),
140
+ ),
141
+ )
142
+ @pytest.mark.parametrize(
143
+ "scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
144
+ [
145
+ ("accuracy", 0.1, True, 5, 1e-7), # use scorer
146
+ ("accuracy", None, True, 5, 1e-1), # use scorer on training data
147
+ (None, 0.1, True, 5, 1e-7), # same with default scorer
148
+ (None, None, True, 5, 1e-1),
149
+ ("loss", 0.1, True, 5, 1e-7), # use loss
150
+ ("loss", None, True, 5, 1e-1), # use loss on training data
151
+ (None, None, False, 5, 0.0), # no early stopping
152
+ ],
153
+ )
154
+ def test_early_stopping_classification(
155
+ data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol
156
+ ):
157
+ max_iter = 50
158
+
159
+ X, y = data
160
+
161
+ gb = HistGradientBoostingClassifier(
162
+ verbose=1, # just for coverage
163
+ min_samples_leaf=5, # easier to overfit fast
164
+ scoring=scoring,
165
+ tol=tol,
166
+ early_stopping=early_stopping,
167
+ validation_fraction=validation_fraction,
168
+ max_iter=max_iter,
169
+ n_iter_no_change=n_iter_no_change,
170
+ random_state=0,
171
+ )
172
+ gb.fit(X, y)
173
+
174
+ if early_stopping is True:
175
+ assert n_iter_no_change <= gb.n_iter_ < max_iter
176
+ else:
177
+ assert gb.n_iter_ == max_iter
178
+
179
+
180
+ @pytest.mark.parametrize(
181
+ "GradientBoosting, X, y",
182
+ [
183
+ (HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
184
+ (HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
185
+ (HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
186
+ (HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),
187
+ ],
188
+ )
189
+ def test_early_stopping_default(GradientBoosting, X, y):
190
+ # Test that early stopping is enabled by default if and only if there
191
+ # are more than 10000 samples
192
+ gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
193
+ gb.fit(X, y)
194
+ if X.shape[0] > 10000:
195
+ assert gb.n_iter_ < gb.max_iter
196
+ else:
197
+ assert gb.n_iter_ == gb.max_iter
198
+
199
+
200
+ @pytest.mark.parametrize(
201
+ "scores, n_iter_no_change, tol, stopping",
202
+ [
203
+ ([], 1, 0.001, False), # not enough iterations
204
+ ([1, 1, 1], 5, 0.001, False), # not enough iterations
205
+ ([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
206
+ ([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
207
+ ([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement
208
+ ([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
209
+ ([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
210
+ ([1] * 6, 5, 0.0, True), # no significant improvement
211
+ ([1] * 6, 5, 0.001, True), # no significant improvement
212
+ ([1] * 6, 5, 5, True), # no significant improvement
213
+ ],
214
+ )
215
+ def test_should_stop(scores, n_iter_no_change, tol, stopping):
216
+ gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)
217
+ assert gbdt._should_stop(scores) == stopping
218
+
219
+
220
+ def test_absolute_error():
221
+ # For coverage only.
222
+ X, y = make_regression(n_samples=500, random_state=0)
223
+ gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0)
224
+ gbdt.fit(X, y)
225
+ assert gbdt.score(X, y) > 0.9
226
+
227
+
228
+ def test_absolute_error_sample_weight():
229
+ # non regression test for issue #19400
230
+ # make sure no error is thrown during fit of
231
+ # HistGradientBoostingRegressor with absolute_error loss function
232
+ # and passing sample_weight
233
+ rng = np.random.RandomState(0)
234
+ n_samples = 100
235
+ X = rng.uniform(-1, 1, size=(n_samples, 2))
236
+ y = rng.uniform(-1, 1, size=n_samples)
237
+ sample_weight = rng.uniform(0, 1, size=n_samples)
238
+ gbdt = HistGradientBoostingRegressor(loss="absolute_error")
239
+ gbdt.fit(X, y, sample_weight=sample_weight)
240
+
241
+
242
+ @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 1.0, 2.0])])
243
+ def test_gamma_y_positive(y):
244
+ # Test that ValueError is raised if any y_i <= 0.
245
+ err_msg = r"loss='gamma' requires strictly positive y."
246
+ gbdt = HistGradientBoostingRegressor(loss="gamma", random_state=0)
247
+ with pytest.raises(ValueError, match=err_msg):
248
+ gbdt.fit(np.zeros(shape=(len(y), 1)), y)
249
+
250
+
251
+ def test_gamma():
252
+ # For a Gamma distributed target, we expect an HGBT trained with the Gamma deviance
253
+ # (loss) to give better results than an HGBT with any other loss function, measured
254
+ # in out-of-sample Gamma deviance as metric/score.
255
+ # Note that squared error could potentially predict negative values which is
256
+ # invalid (np.inf) for the Gamma deviance. A Poisson HGBT (having a log link)
257
+ # does not have that defect.
258
+ # Important note: It seems that a Poisson HGBT almost always has better
259
+ # out-of-sample performance than the Gamma HGBT, measured in Gamma deviance.
260
+ # LightGBM shows the same behaviour. Hence, we only compare to a squared error
261
+ # HGBT, but not to a Poisson deviance HGBT.
262
+ rng = np.random.RandomState(42)
263
+ n_train, n_test, n_features = 500, 100, 20
264
+ X = make_low_rank_matrix(
265
+ n_samples=n_train + n_test,
266
+ n_features=n_features,
267
+ random_state=rng,
268
+ )
269
+ # We create a log-linear Gamma model. This gives y.min ~ 1e-2, y.max ~ 1e2
270
+ coef = rng.uniform(low=-10, high=20, size=n_features)
271
+ # Numpy parametrizes gamma(shape=k, scale=theta) with mean = k * theta and
272
+ # variance = k * theta^2. We parametrize it instead with mean = exp(X @ coef)
273
+ # and variance = dispersion * mean^2 by setting k = 1 / dispersion,
274
+ # theta = dispersion * mean.
275
+ dispersion = 0.5
276
+ y = rng.gamma(shape=1 / dispersion, scale=dispersion * np.exp(X @ coef))
277
+ X_train, X_test, y_train, y_test = train_test_split(
278
+ X, y, test_size=n_test, random_state=rng
279
+ )
280
+ gbdt_gamma = HistGradientBoostingRegressor(loss="gamma", random_state=123)
281
+ gbdt_mse = HistGradientBoostingRegressor(loss="squared_error", random_state=123)
282
+ dummy = DummyRegressor(strategy="mean")
283
+ for model in (gbdt_gamma, gbdt_mse, dummy):
284
+ model.fit(X_train, y_train)
285
+
286
+ for X, y in [(X_train, y_train), (X_test, y_test)]:
287
+ loss_gbdt_gamma = mean_gamma_deviance(y, gbdt_gamma.predict(X))
288
+ # We restrict the squared error HGBT to predict at least the minimum seen y at
289
+ # train time to make it strictly positive.
290
+ loss_gbdt_mse = mean_gamma_deviance(
291
+ y, np.maximum(np.min(y_train), gbdt_mse.predict(X))
292
+ )
293
+ loss_dummy = mean_gamma_deviance(y, dummy.predict(X))
294
+ assert loss_gbdt_gamma < loss_dummy
295
+ assert loss_gbdt_gamma < loss_gbdt_mse
296
+
297
+
298
+ @pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
299
+ def test_quantile_asymmetric_error(quantile):
300
+ """Test quantile regression for asymmetric distributed targets."""
301
+ n_samples = 10_000
302
+ rng = np.random.RandomState(42)
303
+ # take care that X @ coef + intercept > 0
304
+ X = np.concatenate(
305
+ (
306
+ np.abs(rng.randn(n_samples)[:, None]),
307
+ -rng.randint(2, size=(n_samples, 1)),
308
+ ),
309
+ axis=1,
310
+ )
311
+ intercept = 1.23
312
+ coef = np.array([0.5, -2])
313
+ # For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
314
+ # the quantile at level q is:
315
+ # quantile(q) = - log(1 - q) / lambda
316
+ # scale = 1/lambda = -quantile(q) / log(1-q)
317
+ y = rng.exponential(
318
+ scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
319
+ )
320
+ model = HistGradientBoostingRegressor(
321
+ loss="quantile",
322
+ quantile=quantile,
323
+ max_iter=25,
324
+ random_state=0,
325
+ max_leaf_nodes=10,
326
+ ).fit(X, y)
327
+ assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)
328
+
329
+ pinball_loss = PinballLoss(quantile=quantile)
330
+ loss_true_quantile = pinball_loss(y, X @ coef + intercept)
331
+ loss_pred_quantile = pinball_loss(y, model.predict(X))
332
+ # we are overfitting
333
+ assert loss_pred_quantile <= loss_true_quantile
334
+
335
+
336
+ @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
337
+ def test_poisson_y_positive(y):
338
+ # Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
339
+ err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
340
+ gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0)
341
+ with pytest.raises(ValueError, match=err_msg):
342
+ gbdt.fit(np.zeros(shape=(len(y), 1)), y)
343
+
344
+
345
+ def test_poisson():
346
+ # For Poisson distributed target, Poisson loss should give better results
347
+ # than least squares measured in Poisson deviance as metric.
348
+ rng = np.random.RandomState(42)
349
+ n_train, n_test, n_features = 500, 100, 100
350
+ X = make_low_rank_matrix(
351
+ n_samples=n_train + n_test, n_features=n_features, random_state=rng
352
+ )
353
+ # We create a log-linear Poisson model and downscale coef as it will get
354
+ # exponentiated.
355
+ coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
356
+ y = rng.poisson(lam=np.exp(X @ coef))
357
+ X_train, X_test, y_train, y_test = train_test_split(
358
+ X, y, test_size=n_test, random_state=rng
359
+ )
360
+ gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng)
361
+ gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng)
362
+ gbdt_pois.fit(X_train, y_train)
363
+ gbdt_ls.fit(X_train, y_train)
364
+ dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
365
+
366
+ for X, y in [(X_train, y_train), (X_test, y_test)]:
367
+ metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
368
+ # squared_error might produce non-positive predictions => clip
369
+ metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))
370
+ metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
371
+ assert metric_pois < metric_ls
372
+ assert metric_pois < metric_dummy
373
+
374
+
375
+ def test_binning_train_validation_are_separated():
376
+ # Make sure training and validation data are binned separately.
377
+ # See issue 13926
378
+
379
+ rng = np.random.RandomState(0)
380
+ validation_fraction = 0.2
381
+ gb = HistGradientBoostingClassifier(
382
+ early_stopping=True, validation_fraction=validation_fraction, random_state=rng
383
+ )
384
+ gb.fit(X_classification, y_classification)
385
+ mapper_training_data = gb._bin_mapper
386
+
387
+ # Note that since the data is small there is no subsampling and the
388
+ # random_state doesn't matter
389
+ mapper_whole_data = _BinMapper(random_state=0)
390
+ mapper_whole_data.fit(X_classification)
391
+
392
+ n_samples = X_classification.shape[0]
393
+ assert np.all(
394
+ mapper_training_data.n_bins_non_missing_
395
+ == int((1 - validation_fraction) * n_samples)
396
+ )
397
+ assert np.all(
398
+ mapper_training_data.n_bins_non_missing_
399
+ != mapper_whole_data.n_bins_non_missing_
400
+ )
401
+
402
+
403
+ def test_missing_values_trivial():
404
+ # sanity check for missing values support. With only one feature and
405
+ # y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
406
+ # training set.
407
+
408
+ n_samples = 100
409
+ n_features = 1
410
+ rng = np.random.RandomState(0)
411
+
412
+ X = rng.normal(size=(n_samples, n_features))
413
+ mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)
414
+ X[mask] = np.nan
415
+ y = mask.ravel()
416
+ gb = HistGradientBoostingClassifier()
417
+ gb.fit(X, y)
418
+
419
+ assert gb.score(X, y) == pytest.approx(1)
420
+
421
+
422
+ @pytest.mark.parametrize("problem", ("classification", "regression"))
423
+ @pytest.mark.parametrize(
424
+ (
425
+ "missing_proportion, expected_min_score_classification, "
426
+ "expected_min_score_regression"
427
+ ),
428
+ [(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],
429
+ )
430
+ def test_missing_values_resilience(
431
+ problem,
432
+ missing_proportion,
433
+ expected_min_score_classification,
434
+ expected_min_score_regression,
435
+ ):
436
+ # Make sure the estimators can deal with missing values and still yield
437
+ # decent predictions
438
+
439
+ rng = np.random.RandomState(0)
440
+ n_samples = 1000
441
+ n_features = 2
442
+ if problem == "regression":
443
+ X, y = make_regression(
444
+ n_samples=n_samples,
445
+ n_features=n_features,
446
+ n_informative=n_features,
447
+ random_state=rng,
448
+ )
449
+ gb = HistGradientBoostingRegressor()
450
+ expected_min_score = expected_min_score_regression
451
+ else:
452
+ X, y = make_classification(
453
+ n_samples=n_samples,
454
+ n_features=n_features,
455
+ n_informative=n_features,
456
+ n_redundant=0,
457
+ n_repeated=0,
458
+ random_state=rng,
459
+ )
460
+ gb = HistGradientBoostingClassifier()
461
+ expected_min_score = expected_min_score_classification
462
+
463
+ mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)
464
+ X[mask] = np.nan
465
+
466
+ gb.fit(X, y)
467
+
468
+ assert gb.score(X, y) > expected_min_score
469
+
470
+
471
+ @pytest.mark.parametrize(
472
+ "data",
473
+ [
474
+ make_classification(random_state=0, n_classes=2),
475
+ make_classification(random_state=0, n_classes=3, n_informative=3),
476
+ ],
477
+ ids=["binary_log_loss", "multiclass_log_loss"],
478
+ )
479
+ def test_zero_division_hessians(data):
480
+ # non regression test for issue #14018
481
+ # make sure we avoid zero division errors when computing the leaves values.
482
+
483
+ # If the learning rate is too high, the raw predictions are bad and will
484
+ # saturate the softmax (or sigmoid in binary classif). This leads to
485
+ # probabilities being exactly 0 or 1, gradients being constant, and
486
+ # hessians being zero.
487
+ X, y = data
488
+ gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
489
+ gb.fit(X, y)
490
+
491
+
492
+ def test_small_trainset():
493
+ # Make sure that the small trainset is stratified and has the expected
494
+ # length (10k samples)
495
+ n_samples = 20000
496
+ original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
497
+ rng = np.random.RandomState(42)
498
+ X = rng.randn(n_samples).reshape(n_samples, 1)
499
+ y = [
500
+ [class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()
501
+ ]
502
+ y = shuffle(np.concatenate(y))
503
+ gb = HistGradientBoostingClassifier()
504
+
505
+ # Compute the small training set
506
+ X_small, y_small, *_ = gb._get_small_trainset(
507
+ X, y, seed=42, sample_weight_train=None
508
+ )
509
+
510
+ # Compute the class distribution in the small training set
511
+ unique, counts = np.unique(y_small, return_counts=True)
512
+ small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}
513
+
514
+ # Test that the small training set has the expected length
515
+ assert X_small.shape[0] == 10000
516
+ assert y_small.shape[0] == 10000
517
+
518
+ # Test that the class distributions in the whole dataset and in the small
519
+ # training set are identical
520
+ assert small_distrib == pytest.approx(original_distrib)
521
+
522
+
523
+ def test_missing_values_minmax_imputation():
524
+ # Compare the buit-in missing value handling of Histogram GBC with an
525
+ # a-priori missing value imputation strategy that should yield the same
526
+ # results in terms of decision function.
527
+ #
528
+ # Each feature (containing NaNs) is replaced by 2 features:
529
+ # - one where the nans are replaced by min(feature) - 1
530
+ # - one where the nans are replaced by max(feature) + 1
531
+ # A split where nans go to the left has an equivalent split in the
532
+ # first (min) feature, and a split where nans go to the right has an
533
+ # equivalent split in the second (max) feature.
534
+ #
535
+ # Assuming the data is such that there is never a tie to select the best
536
+ # feature to split on during training, the learned decision trees should be
537
+ # strictly equivalent (learn a sequence of splits that encode the same
538
+ # decision function).
539
+ #
540
+ # The MinMaxImputer transformer is meant to be a toy implementation of the
541
+ # "Missing In Attributes" (MIA) missing value handling for decision trees
542
+ # https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
543
+ # The implementation of MIA as an imputation transformer was suggested by
544
+ # "Remark 3" in :arxiv:'<1902.06931>`
545
+
546
+ class MinMaxImputer(TransformerMixin, BaseEstimator):
547
+ def fit(self, X, y=None):
548
+ mm = MinMaxScaler().fit(X)
549
+ self.data_min_ = mm.data_min_
550
+ self.data_max_ = mm.data_max_
551
+ return self
552
+
553
+ def transform(self, X):
554
+ X_min, X_max = X.copy(), X.copy()
555
+
556
+ for feature_idx in range(X.shape[1]):
557
+ nan_mask = np.isnan(X[:, feature_idx])
558
+ X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
559
+ X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
560
+
561
+ return np.concatenate([X_min, X_max], axis=1)
562
+
563
+ def make_missing_value_data(n_samples=int(1e4), seed=0):
564
+ rng = np.random.RandomState(seed)
565
+ X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
566
+
567
+ # Pre-bin the data to ensure a deterministic handling by the 2
568
+ # strategies and also make it easier to insert np.nan in a structured
569
+ # way:
570
+ X = KBinsDiscretizer(n_bins=42, encode="ordinal").fit_transform(X)
571
+
572
+ # First feature has missing values completely at random:
573
+ rnd_mask = rng.rand(X.shape[0]) > 0.9
574
+ X[rnd_mask, 0] = np.nan
575
+
576
+ # Second and third features have missing values for extreme values
577
+ # (censoring missingness):
578
+ low_mask = X[:, 1] == 0
579
+ X[low_mask, 1] = np.nan
580
+
581
+ high_mask = X[:, 2] == X[:, 2].max()
582
+ X[high_mask, 2] = np.nan
583
+
584
+ # Make the last feature nan pattern very informative:
585
+ y_max = np.percentile(y, 70)
586
+ y_max_mask = y >= y_max
587
+ y[y_max_mask] = y_max
588
+ X[y_max_mask, 3] = np.nan
589
+
590
+ # Check that there is at least one missing value in each feature:
591
+ for feature_idx in range(X.shape[1]):
592
+ assert any(np.isnan(X[:, feature_idx]))
593
+
594
+ # Let's use a test set to check that the learned decision function is
595
+ # the same as evaluated on unseen data. Otherwise it could just be the
596
+ # case that we find two independent ways to overfit the training set.
597
+ return train_test_split(X, y, random_state=rng)
598
+
599
+ # n_samples need to be large enough to minimize the likelihood of having
600
+ # several candidate splits with the same gain value in a given tree.
601
+ X_train, X_test, y_train, y_test = make_missing_value_data(
602
+ n_samples=int(1e4), seed=0
603
+ )
604
+
605
+ # Use a small number of leaf nodes and iterations so as to keep
606
+ # under-fitting models to minimize the likelihood of ties when training the
607
+ # model.
608
+ gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
609
+ gbm1.fit(X_train, y_train)
610
+
611
+ gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
612
+ gbm2.fit(X_train, y_train)
613
+
614
+ # Check that the model reach the same score:
615
+ assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
616
+
617
+ assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
618
+
619
+ # Check the individual prediction match as a finer grained
620
+ # decision function check.
621
+ assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
622
+ assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
623
+
624
+
625
+ def test_infinite_values():
626
+ # Basic test for infinite values
627
+
628
+ X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
629
+ y = np.array([0, 0, 1, 1])
630
+
631
+ gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
632
+ gbdt.fit(X, y)
633
+ np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
634
+
635
+
636
+ def test_consistent_lengths():
637
+ X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
638
+ y = np.array([0, 0, 1, 1])
639
+ sample_weight = np.array([0.1, 0.3, 0.1])
640
+ gbdt = HistGradientBoostingRegressor()
641
+ with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"):
642
+ gbdt.fit(X, y, sample_weight)
643
+
644
+ with pytest.raises(
645
+ ValueError, match="Found input variables with inconsistent number"
646
+ ):
647
+ gbdt.fit(X, y[1:])
648
+
649
+
650
+ def test_infinite_values_missing_values():
651
+ # High level test making sure that inf and nan values are properly handled
652
+ # when both are present. This is similar to
653
+ # test_split_on_nan_with_infinite_values() in test_grower.py, though we
654
+ # cannot check the predictions for binned values here.
655
+
656
+ X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
657
+ y_isnan = np.isnan(X.ravel())
658
+ y_isinf = X.ravel() == np.inf
659
+
660
+ stump_clf = HistGradientBoostingClassifier(
661
+ min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2
662
+ )
663
+
664
+ assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
665
+ assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
666
+
667
+
668
+ @pytest.mark.parametrize("scoring", [None, "loss"])
669
+ def test_string_target_early_stopping(scoring):
670
+ # Regression tests for #14709 where the targets need to be encoded before
671
+ # to compute the score
672
+ rng = np.random.RandomState(42)
673
+ X = rng.randn(100, 10)
674
+ y = np.array(["x"] * 50 + ["y"] * 50, dtype=object)
675
+ gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
676
+ gbrt.fit(X, y)
677
+
678
+
679
+ def test_zero_sample_weights_regression():
680
+ # Make sure setting a SW to zero amounts to ignoring the corresponding
681
+ # sample
682
+
683
+ X = [[1, 0], [1, 0], [1, 0], [0, 1]]
684
+ y = [0, 0, 1, 0]
685
+ # ignore the first 2 training samples by setting their weight to 0
686
+ sample_weight = [0, 0, 1, 1]
687
+ gb = HistGradientBoostingRegressor(min_samples_leaf=1)
688
+ gb.fit(X, y, sample_weight=sample_weight)
689
+ assert gb.predict([[1, 0]])[0] > 0.5
690
+
691
+
692
+ def test_zero_sample_weights_classification():
693
+ # Make sure setting a SW to zero amounts to ignoring the corresponding
694
+ # sample
695
+
696
+ X = [[1, 0], [1, 0], [1, 0], [0, 1]]
697
+ y = [0, 0, 1, 0]
698
+ # ignore the first 2 training samples by setting their weight to 0
699
+ sample_weight = [0, 0, 1, 1]
700
+ gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
701
+ gb.fit(X, y, sample_weight=sample_weight)
702
+ assert_array_equal(gb.predict([[1, 0]]), [1])
703
+
704
+ X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]
705
+ y = [0, 0, 1, 0, 2]
706
+ # ignore the first 2 training samples by setting their weight to 0
707
+ sample_weight = [0, 0, 1, 1, 1]
708
+ gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
709
+ gb.fit(X, y, sample_weight=sample_weight)
710
+ assert_array_equal(gb.predict([[1, 0]]), [1])
711
+
712
+
713
+ @pytest.mark.parametrize(
714
+ "problem", ("regression", "binary_classification", "multiclass_classification")
715
+ )
716
+ @pytest.mark.parametrize("duplication", ("half", "all"))
717
+ def test_sample_weight_effect(problem, duplication):
718
+ # High level test to make sure that duplicating a sample is equivalent to
719
+ # giving it weight of 2.
720
+
721
+ # fails for n_samples > 255 because binning does not take sample weights
722
+ # into account. Keeping n_samples <= 255 makes
723
+ # sure only unique values are used so SW have no effect on binning.
724
+ n_samples = 255
725
+ n_features = 2
726
+ if problem == "regression":
727
+ X, y = make_regression(
728
+ n_samples=n_samples,
729
+ n_features=n_features,
730
+ n_informative=n_features,
731
+ random_state=0,
732
+ )
733
+ Klass = HistGradientBoostingRegressor
734
+ else:
735
+ n_classes = 2 if problem == "binary_classification" else 3
736
+ X, y = make_classification(
737
+ n_samples=n_samples,
738
+ n_features=n_features,
739
+ n_informative=n_features,
740
+ n_redundant=0,
741
+ n_clusters_per_class=1,
742
+ n_classes=n_classes,
743
+ random_state=0,
744
+ )
745
+ Klass = HistGradientBoostingClassifier
746
+
747
+ # This test can't pass if min_samples_leaf > 1 because that would force 2
748
+ # samples to be in the same node in est_sw, while these samples would be
749
+ # free to be separate in est_dup: est_dup would just group together the
750
+ # duplicated samples.
751
+ est = Klass(min_samples_leaf=1)
752
+
753
+ # Create dataset with duplicate and corresponding sample weights
754
+ if duplication == "half":
755
+ lim = n_samples // 2
756
+ else:
757
+ lim = n_samples
758
+ X_dup = np.r_[X, X[:lim]]
759
+ y_dup = np.r_[y, y[:lim]]
760
+ sample_weight = np.ones(shape=(n_samples))
761
+ sample_weight[:lim] = 2
762
+
763
+ est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
764
+ est_dup = clone(est).fit(X_dup, y_dup)
765
+
766
+ # checking raw_predict is stricter than just predict for classification
767
+ assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))
768
+
769
+
770
+ @pytest.mark.parametrize("Loss", (HalfSquaredError, AbsoluteError))
771
+ def test_sum_hessians_are_sample_weight(Loss):
772
+ # For losses with constant hessians, the sum_hessians field of the
773
+ # histograms must be equal to the sum of the sample weight of samples at
774
+ # the corresponding bin.
775
+
776
+ rng = np.random.RandomState(0)
777
+ n_samples = 1000
778
+ n_features = 2
779
+ X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)
780
+ bin_mapper = _BinMapper()
781
+ X_binned = bin_mapper.fit_transform(X)
782
+
783
+ # While sample weights are supposed to be positive, this still works.
784
+ sample_weight = rng.normal(size=n_samples)
785
+
786
+ loss = Loss(sample_weight=sample_weight)
787
+ gradients, hessians = loss.init_gradient_and_hessian(
788
+ n_samples=n_samples, dtype=G_H_DTYPE
789
+ )
790
+ gradients, hessians = gradients.reshape((-1, 1)), hessians.reshape((-1, 1))
791
+ raw_predictions = rng.normal(size=(n_samples, 1))
792
+ loss.gradient_hessian(
793
+ y_true=y,
794
+ raw_prediction=raw_predictions,
795
+ sample_weight=sample_weight,
796
+ gradient_out=gradients,
797
+ hessian_out=hessians,
798
+ n_threads=n_threads,
799
+ )
800
+
801
+ # build sum_sample_weight which contains the sum of the sample weights at
802
+ # each bin (for each feature). This must be equal to the sum_hessians
803
+ # field of the corresponding histogram
804
+ sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
805
+ for feature_idx in range(n_features):
806
+ for sample_idx in range(n_samples):
807
+ sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[
808
+ sample_idx
809
+ ]
810
+
811
+ # Build histogram
812
+ grower = TreeGrower(
813
+ X_binned, gradients[:, 0], hessians[:, 0], n_bins=bin_mapper.n_bins
814
+ )
815
+ histograms = grower.histogram_builder.compute_histograms_brute(
816
+ grower.root.sample_indices
817
+ )
818
+
819
+ for feature_idx in range(n_features):
820
+ for bin_idx in range(bin_mapper.n_bins):
821
+ assert histograms[feature_idx, bin_idx]["sum_hessians"] == (
822
+ pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)
823
+ )
824
+
825
+
826
+ def test_max_depth_max_leaf_nodes():
827
+ # Non regression test for
828
+ # https://github.com/scikit-learn/scikit-learn/issues/16179
829
+ # there was a bug when the max_depth and the max_leaf_nodes criteria were
830
+ # met at the same time, which would lead to max_leaf_nodes not being
831
+ # respected.
832
+ X, y = make_classification(random_state=0)
833
+ est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(
834
+ X, y
835
+ )
836
+ tree = est._predictors[0][0]
837
+ assert tree.get_max_depth() == 2
838
+ assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
839
+
840
+
841
+ def test_early_stopping_on_test_set_with_warm_start():
842
+ # Non regression test for #16661 where second fit fails with
843
+ # warm_start=True, early_stopping is on, and no validation set
844
+ X, y = make_classification(random_state=0)
845
+ gb = HistGradientBoostingClassifier(
846
+ max_iter=1,
847
+ scoring="loss",
848
+ warm_start=True,
849
+ early_stopping=True,
850
+ n_iter_no_change=1,
851
+ validation_fraction=None,
852
+ )
853
+
854
+ gb.fit(X, y)
855
+ # does not raise on second call
856
+ gb.set_params(max_iter=2)
857
+ gb.fit(X, y)
858
+
859
+
860
+ def test_early_stopping_with_sample_weights(monkeypatch):
861
+ """Check that sample weights is passed in to the scorer and _raw_predict is not
862
+ called."""
863
+
864
+ mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
865
+
866
+ def mock_check_scoring(estimator, scoring):
867
+ assert scoring == "neg_median_absolute_error"
868
+ return mock_scorer
869
+
870
+ monkeypatch.setattr(
871
+ sklearn.ensemble._hist_gradient_boosting.gradient_boosting,
872
+ "check_scoring",
873
+ mock_check_scoring,
874
+ )
875
+
876
+ X, y = make_regression(random_state=0)
877
+ sample_weight = np.ones_like(y)
878
+ hist = HistGradientBoostingRegressor(
879
+ max_iter=2,
880
+ early_stopping=True,
881
+ random_state=0,
882
+ scoring="neg_median_absolute_error",
883
+ )
884
+ mock_raw_predict = Mock(side_effect=hist._raw_predict)
885
+ hist._raw_predict = mock_raw_predict
886
+ hist.fit(X, y, sample_weight=sample_weight)
887
+
888
+ # _raw_predict should never be called with scoring as a string
889
+ assert mock_raw_predict.call_count == 0
890
+
891
+ # For scorer is called twice (train and val) for the baseline score, and twice
892
+ # per iteration (train and val) after that. So 6 times in total for `max_iter=2`.
893
+ assert mock_scorer.call_count == 6
894
+ for arg_list in mock_scorer.call_args_list:
895
+ assert "sample_weight" in arg_list[1]
896
+
897
+
898
+ def test_raw_predict_is_called_with_custom_scorer():
899
+ """Custom scorer will still call _raw_predict."""
900
+
901
+ mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
902
+
903
+ X, y = make_regression(random_state=0)
904
+ hist = HistGradientBoostingRegressor(
905
+ max_iter=2,
906
+ early_stopping=True,
907
+ random_state=0,
908
+ scoring=mock_scorer,
909
+ )
910
+ mock_raw_predict = Mock(side_effect=hist._raw_predict)
911
+ hist._raw_predict = mock_raw_predict
912
+ hist.fit(X, y)
913
+
914
+ # `_raw_predict` and scorer is called twice (train and val) for the baseline score,
915
+ # and twice per iteration (train and val) after that. So 6 times in total for
916
+ # `max_iter=2`.
917
+ assert mock_raw_predict.call_count == 6
918
+ assert mock_scorer.call_count == 6
919
+
920
+
921
+ @pytest.mark.parametrize(
922
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
923
+ )
924
+ def test_single_node_trees(Est):
925
+ # Make sure it's still possible to build single-node trees. In that case
926
+ # the value of the root is set to 0. That's a correct value: if the tree is
927
+ # single-node that's because min_gain_to_split is not respected right from
928
+ # the root, so we don't want the tree to have any impact on the
929
+ # predictions.
930
+
931
+ X, y = make_classification(random_state=0)
932
+ y[:] = 1 # constant target will lead to a single root node
933
+
934
+ est = Est(max_iter=20)
935
+ est.fit(X, y)
936
+
937
+ assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)
938
+ assert all(predictor[0].nodes[0]["value"] == 0 for predictor in est._predictors)
939
+ # Still gives correct predictions thanks to the baseline prediction
940
+ assert_allclose(est.predict(X), y)
941
+
942
+
943
+ @pytest.mark.parametrize(
944
+ "Est, loss, X, y",
945
+ [
946
+ (
947
+ HistGradientBoostingClassifier,
948
+ HalfBinomialLoss(sample_weight=None),
949
+ X_classification,
950
+ y_classification,
951
+ ),
952
+ (
953
+ HistGradientBoostingRegressor,
954
+ HalfSquaredError(sample_weight=None),
955
+ X_regression,
956
+ y_regression,
957
+ ),
958
+ ],
959
+ )
960
+ def test_custom_loss(Est, loss, X, y):
961
+ est = Est(loss=loss, max_iter=20)
962
+ est.fit(X, y)
963
+
964
+
965
+ @pytest.mark.parametrize(
966
+ "HistGradientBoosting, X, y",
967
+ [
968
+ (HistGradientBoostingClassifier, X_classification, y_classification),
969
+ (HistGradientBoostingRegressor, X_regression, y_regression),
970
+ (
971
+ HistGradientBoostingClassifier,
972
+ X_multi_classification,
973
+ y_multi_classification,
974
+ ),
975
+ ],
976
+ )
977
+ def test_staged_predict(HistGradientBoosting, X, y):
978
+ # Test whether staged predictor eventually gives
979
+ # the same prediction.
980
+ X_train, X_test, y_train, y_test = train_test_split(
981
+ X, y, test_size=0.5, random_state=0
982
+ )
983
+ gb = HistGradientBoosting(max_iter=10)
984
+
985
+ # test raise NotFittedError if not fitted
986
+ with pytest.raises(NotFittedError):
987
+ next(gb.staged_predict(X_test))
988
+
989
+ gb.fit(X_train, y_train)
990
+
991
+ # test if the staged predictions of each iteration
992
+ # are equal to the corresponding predictions of the same estimator
993
+ # trained from scratch.
994
+ # this also test limit case when max_iter = 1
995
+ method_names = (
996
+ ["predict"]
997
+ if is_regressor(gb)
998
+ else ["predict", "predict_proba", "decision_function"]
999
+ )
1000
+ for method_name in method_names:
1001
+ staged_method = getattr(gb, "staged_" + method_name)
1002
+ staged_predictions = list(staged_method(X_test))
1003
+ assert len(staged_predictions) == gb.n_iter_
1004
+ for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):
1005
+ aux = HistGradientBoosting(max_iter=n_iter)
1006
+ aux.fit(X_train, y_train)
1007
+ pred_aux = getattr(aux, method_name)(X_test)
1008
+
1009
+ assert_allclose(staged_predictions, pred_aux)
1010
+ assert staged_predictions.shape == pred_aux.shape
1011
+
1012
+
1013
+ @pytest.mark.parametrize("insert_missing", [False, True])
1014
+ @pytest.mark.parametrize(
1015
+ "Est", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)
1016
+ )
1017
+ @pytest.mark.parametrize("bool_categorical_parameter", [True, False])
1018
+ @pytest.mark.parametrize("missing_value", [np.nan, -1])
1019
+ def test_unknown_categories_nan(
1020
+ insert_missing, Est, bool_categorical_parameter, missing_value
1021
+ ):
1022
+ # Make sure no error is raised at predict if a category wasn't seen during
1023
+ # fit. We also make sure they're treated as nans.
1024
+
1025
+ rng = np.random.RandomState(0)
1026
+ n_samples = 1000
1027
+ f1 = rng.rand(n_samples)
1028
+ f2 = rng.randint(4, size=n_samples)
1029
+ X = np.c_[f1, f2]
1030
+ y = np.zeros(shape=n_samples)
1031
+ y[X[:, 1] % 2 == 0] = 1
1032
+
1033
+ if bool_categorical_parameter:
1034
+ categorical_features = [False, True]
1035
+ else:
1036
+ categorical_features = [1]
1037
+
1038
+ if insert_missing:
1039
+ mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)
1040
+ assert mask.sum() > 0
1041
+ X[mask] = missing_value
1042
+
1043
+ est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)
1044
+ assert_array_equal(est.is_categorical_, [False, True])
1045
+
1046
+ # Make sure no error is raised on unknown categories and nans
1047
+ # unknown categories will be treated as nans
1048
+ X_test = np.zeros((10, X.shape[1]), dtype=float)
1049
+ X_test[:5, 1] = 30
1050
+ X_test[5:, 1] = missing_value
1051
+ assert len(np.unique(est.predict(X_test))) == 1
1052
+
1053
+
1054
+ def test_categorical_encoding_strategies():
1055
+ # Check native categorical handling vs different encoding strategies. We
1056
+ # make sure that native encoding needs only 1 split to achieve a perfect
1057
+ # prediction on a simple dataset. In contrast, OneHotEncoded data needs
1058
+ # more depth / splits, and treating categories as ordered (just using
1059
+ # OrdinalEncoder) requires even more depth.
1060
+
1061
+ # dataset with one random continuous feature, and one categorical feature
1062
+ # with values in [0, 5], e.g. from an OrdinalEncoder.
1063
+ # class == 1 iff categorical value in {0, 2, 4}
1064
+ rng = np.random.RandomState(0)
1065
+ n_samples = 10_000
1066
+ f1 = rng.rand(n_samples)
1067
+ f2 = rng.randint(6, size=n_samples)
1068
+ X = np.c_[f1, f2]
1069
+ y = np.zeros(shape=n_samples)
1070
+ y[X[:, 1] % 2 == 0] = 1
1071
+
1072
+ # make sure dataset is balanced so that the baseline_prediction doesn't
1073
+ # influence predictions too much with max_iter = 1
1074
+ assert 0.49 < y.mean() < 0.51
1075
+
1076
+ native_cat_specs = [
1077
+ [False, True],
1078
+ [1],
1079
+ ]
1080
+ try:
1081
+ import pandas as pd
1082
+
1083
+ X = pd.DataFrame(X, columns=["f_0", "f_1"])
1084
+ native_cat_specs.append(["f_1"])
1085
+ except ImportError:
1086
+ pass
1087
+
1088
+ for native_cat_spec in native_cat_specs:
1089
+ clf_cat = HistGradientBoostingClassifier(
1090
+ max_iter=1, max_depth=1, categorical_features=native_cat_spec
1091
+ )
1092
+ clf_cat.fit(X, y)
1093
+
1094
+ # Using native categorical encoding, we get perfect predictions with just
1095
+ # one split
1096
+ assert cross_val_score(clf_cat, X, y).mean() == 1
1097
+
1098
+ # quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21
1099
+ expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]
1100
+ left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]
1101
+ assert_array_equal(left_bitset, expected_left_bitset)
1102
+
1103
+ # Treating categories as ordered, we need more depth / more splits to get
1104
+ # the same predictions
1105
+ clf_no_cat = HistGradientBoostingClassifier(
1106
+ max_iter=1, max_depth=4, categorical_features=None
1107
+ )
1108
+ assert cross_val_score(clf_no_cat, X, y).mean() < 0.9
1109
+
1110
+ clf_no_cat.set_params(max_depth=5)
1111
+ assert cross_val_score(clf_no_cat, X, y).mean() == 1
1112
+
1113
+ # Using OHEd data, we need less splits than with pure OEd data, but we
1114
+ # still need more splits than with the native categorical splits
1115
+ ct = make_column_transformer(
1116
+ (OneHotEncoder(sparse_output=False), [1]), remainder="passthrough"
1117
+ )
1118
+ X_ohe = ct.fit_transform(X)
1119
+ clf_no_cat.set_params(max_depth=2)
1120
+ assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9
1121
+
1122
+ clf_no_cat.set_params(max_depth=3)
1123
+ assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1
1124
+
1125
+
1126
+ @pytest.mark.parametrize(
1127
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1128
+ )
1129
+ @pytest.mark.parametrize(
1130
+ "categorical_features, monotonic_cst, expected_msg",
1131
+ [
1132
+ (
1133
+ [b"hello", b"world"],
1134
+ None,
1135
+ re.escape(
1136
+ "categorical_features must be an array-like of bool, int or str, "
1137
+ "got: bytes40."
1138
+ ),
1139
+ ),
1140
+ (
1141
+ np.array([b"hello", 1.3], dtype=object),
1142
+ None,
1143
+ re.escape(
1144
+ "categorical_features must be an array-like of bool, int or str, "
1145
+ "got: bytes, float."
1146
+ ),
1147
+ ),
1148
+ (
1149
+ [0, -1],
1150
+ None,
1151
+ re.escape(
1152
+ "categorical_features set as integer indices must be in "
1153
+ "[0, n_features - 1]"
1154
+ ),
1155
+ ),
1156
+ (
1157
+ [True, True, False, False, True],
1158
+ None,
1159
+ re.escape(
1160
+ "categorical_features set as a boolean mask must have shape "
1161
+ "(n_features,)"
1162
+ ),
1163
+ ),
1164
+ (
1165
+ [True, True, False, False],
1166
+ [0, -1, 0, 1],
1167
+ "Categorical features cannot have monotonic constraints",
1168
+ ),
1169
+ ],
1170
+ )
1171
+ def test_categorical_spec_errors(
1172
+ Est, categorical_features, monotonic_cst, expected_msg
1173
+ ):
1174
+ # Test errors when categories are specified incorrectly
1175
+ n_samples = 100
1176
+ X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)
1177
+ rng = np.random.RandomState(0)
1178
+ X[:, 0] = rng.randint(0, 10, size=n_samples)
1179
+ X[:, 1] = rng.randint(0, 10, size=n_samples)
1180
+ est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)
1181
+
1182
+ with pytest.raises(ValueError, match=expected_msg):
1183
+ est.fit(X, y)
1184
+
1185
+
1186
+ @pytest.mark.parametrize(
1187
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1188
+ )
1189
+ def test_categorical_spec_errors_with_feature_names(Est):
1190
+ pd = pytest.importorskip("pandas")
1191
+ n_samples = 10
1192
+ X = pd.DataFrame(
1193
+ {
1194
+ "f0": range(n_samples),
1195
+ "f1": range(n_samples),
1196
+ "f2": [1.0] * n_samples,
1197
+ }
1198
+ )
1199
+ y = [0, 1] * (n_samples // 2)
1200
+
1201
+ est = Est(categorical_features=["f0", "f1", "f3"])
1202
+ expected_msg = re.escape(
1203
+ "categorical_features has a item value 'f3' which is not a valid "
1204
+ "feature name of the training data."
1205
+ )
1206
+ with pytest.raises(ValueError, match=expected_msg):
1207
+ est.fit(X, y)
1208
+
1209
+ est = Est(categorical_features=["f0", "f1"])
1210
+ expected_msg = re.escape(
1211
+ "categorical_features should be passed as an array of integers or "
1212
+ "as a boolean mask when the model is fitted on data without feature "
1213
+ "names."
1214
+ )
1215
+ with pytest.raises(ValueError, match=expected_msg):
1216
+ est.fit(X.to_numpy(), y)
1217
+
1218
+
1219
+ @pytest.mark.parametrize(
1220
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1221
+ )
1222
+ @pytest.mark.parametrize("categorical_features", ([False, False], []))
1223
+ @pytest.mark.parametrize("as_array", (True, False))
1224
+ def test_categorical_spec_no_categories(Est, categorical_features, as_array):
1225
+ # Make sure we can properly detect that no categorical features are present
1226
+ # even if the categorical_features parameter is not None
1227
+ X = np.arange(10).reshape(5, 2)
1228
+ y = np.arange(5)
1229
+ if as_array:
1230
+ categorical_features = np.asarray(categorical_features)
1231
+ est = Est(categorical_features=categorical_features).fit(X, y)
1232
+ assert est.is_categorical_ is None
1233
+
1234
+
1235
+ @pytest.mark.parametrize(
1236
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1237
+ )
1238
+ @pytest.mark.parametrize(
1239
+ "use_pandas, feature_name", [(False, "at index 0"), (True, "'f0'")]
1240
+ )
1241
+ def test_categorical_bad_encoding_errors(Est, use_pandas, feature_name):
1242
+ # Test errors when categories are encoded incorrectly
1243
+
1244
+ gb = Est(categorical_features=[True], max_bins=2)
1245
+
1246
+ if use_pandas:
1247
+ pd = pytest.importorskip("pandas")
1248
+ X = pd.DataFrame({"f0": [0, 1, 2]})
1249
+ else:
1250
+ X = np.array([[0, 1, 2]]).T
1251
+ y = np.arange(3)
1252
+ msg = (
1253
+ f"Categorical feature {feature_name} is expected to have a "
1254
+ "cardinality <= 2 but actually has a cardinality of 3."
1255
+ )
1256
+ with pytest.raises(ValueError, match=msg):
1257
+ gb.fit(X, y)
1258
+
1259
+ # nans are ignored in the counts
1260
+ X = np.array([[0, 1, np.nan]]).T
1261
+ y = np.arange(3)
1262
+ gb.fit(X, y)
1263
+
1264
+
1265
+ @pytest.mark.parametrize(
1266
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1267
+ )
1268
+ def test_uint8_predict(Est):
1269
+ # Non regression test for
1270
+ # https://github.com/scikit-learn/scikit-learn/issues/18408
1271
+ # Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It
1272
+ # will be converted to X_DTYPE.
1273
+
1274
+ rng = np.random.RandomState(0)
1275
+
1276
+ X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)
1277
+ y = rng.randint(0, 2, size=10).astype(np.uint8)
1278
+ est = Est()
1279
+ est.fit(X, y)
1280
+ est.predict(X)
1281
+
1282
+
1283
+ @pytest.mark.parametrize(
1284
+ "interaction_cst, n_features, result",
1285
+ [
1286
+ (None, 931, None),
1287
+ ([{0, 1}], 2, [{0, 1}]),
1288
+ ("pairwise", 2, [{0, 1}]),
1289
+ ("pairwise", 4, [{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}]),
1290
+ ("no_interactions", 2, [{0}, {1}]),
1291
+ ("no_interactions", 4, [{0}, {1}, {2}, {3}]),
1292
+ ([(1, 0), [5, 1]], 6, [{0, 1}, {1, 5}, {2, 3, 4}]),
1293
+ ],
1294
+ )
1295
+ def test_check_interaction_cst(interaction_cst, n_features, result):
1296
+ """Check that _check_interaction_cst returns the expected list of sets"""
1297
+ est = HistGradientBoostingRegressor()
1298
+ est.set_params(interaction_cst=interaction_cst)
1299
+ assert est._check_interaction_cst(n_features) == result
1300
+
1301
+
1302
+ def test_interaction_cst_numerically():
1303
+ """Check that interaction constraints have no forbidden interactions."""
1304
+ rng = np.random.RandomState(42)
1305
+ n_samples = 1000
1306
+ X = rng.uniform(size=(n_samples, 2))
1307
+ # Construct y with a strong interaction term
1308
+ # y = x0 + x1 + 5 * x0 * x1
1309
+ y = np.hstack((X, 5 * X[:, [0]] * X[:, [1]])).sum(axis=1)
1310
+
1311
+ est = HistGradientBoostingRegressor(random_state=42)
1312
+ est.fit(X, y)
1313
+ est_no_interactions = HistGradientBoostingRegressor(
1314
+ interaction_cst=[{0}, {1}], random_state=42
1315
+ )
1316
+ est_no_interactions.fit(X, y)
1317
+
1318
+ delta = 0.25
1319
+ # Make sure we do not extrapolate out of the training set as tree-based estimators
1320
+ # are very bad in doing so.
1321
+ X_test = X[(X[:, 0] < 1 - delta) & (X[:, 1] < 1 - delta)]
1322
+ X_delta_d_0 = X_test + [delta, 0]
1323
+ X_delta_0_d = X_test + [0, delta]
1324
+ X_delta_d_d = X_test + [delta, delta]
1325
+
1326
+ # Note: For the y from above as a function of x0 and x1, we have
1327
+ # y(x0+d, x1+d) = y(x0, x1) + 5 * d * (2/5 + x0 + x1) + 5 * d**2
1328
+ # y(x0+d, x1) = y(x0, x1) + 5 * d * (1/5 + x1)
1329
+ # y(x0, x1+d) = y(x0, x1) + 5 * d * (1/5 + x0)
1330
+ # Without interaction constraints, we would expect a result of 5 * d**2 for the
1331
+ # following expression, but zero with constraints in place.
1332
+ assert_allclose(
1333
+ est_no_interactions.predict(X_delta_d_d)
1334
+ + est_no_interactions.predict(X_test)
1335
+ - est_no_interactions.predict(X_delta_d_0)
1336
+ - est_no_interactions.predict(X_delta_0_d),
1337
+ 0,
1338
+ atol=1e-12,
1339
+ )
1340
+
1341
+ # Correct result of the expressions is 5 * delta**2. But this is hard to achieve by
1342
+ # a fitted tree-based model. However, with 100 iterations the expression should
1343
+ # at least be positive!
1344
+ assert np.all(
1345
+ est.predict(X_delta_d_d)
1346
+ + est.predict(X_test)
1347
+ - est.predict(X_delta_d_0)
1348
+ - est.predict(X_delta_0_d)
1349
+ > 0.01
1350
+ )
1351
+
1352
+
1353
+ def test_no_user_warning_with_scoring():
1354
+ """Check that no UserWarning is raised when scoring is set.
1355
+
1356
+ Non-regression test for #22907.
1357
+ """
1358
+ pd = pytest.importorskip("pandas")
1359
+ X, y = make_regression(n_samples=50, random_state=0)
1360
+ X_df = pd.DataFrame(X, columns=[f"col{i}" for i in range(X.shape[1])])
1361
+
1362
+ est = HistGradientBoostingRegressor(
1363
+ random_state=0, scoring="neg_mean_absolute_error", early_stopping=True
1364
+ )
1365
+ with warnings.catch_warnings():
1366
+ warnings.simplefilter("error", UserWarning)
1367
+ est.fit(X_df, y)
1368
+
1369
+
1370
+ def test_class_weights():
1371
+ """High level test to check class_weights."""
1372
+ n_samples = 255
1373
+ n_features = 2
1374
+
1375
+ X, y = make_classification(
1376
+ n_samples=n_samples,
1377
+ n_features=n_features,
1378
+ n_informative=n_features,
1379
+ n_redundant=0,
1380
+ n_clusters_per_class=1,
1381
+ n_classes=2,
1382
+ random_state=0,
1383
+ )
1384
+ y_is_1 = y == 1
1385
+
1386
+ # class_weight is the same as sample weights with the corresponding class
1387
+ clf = HistGradientBoostingClassifier(
1388
+ min_samples_leaf=2, random_state=0, max_depth=2
1389
+ )
1390
+ sample_weight = np.ones(shape=(n_samples))
1391
+ sample_weight[y_is_1] = 3.0
1392
+ clf.fit(X, y, sample_weight=sample_weight)
1393
+
1394
+ class_weight = {0: 1.0, 1: 3.0}
1395
+ clf_class_weighted = clone(clf).set_params(class_weight=class_weight)
1396
+ clf_class_weighted.fit(X, y)
1397
+
1398
+ assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
1399
+
1400
+ # Check that sample_weight and class_weight are multiplicative
1401
+ clf.fit(X, y, sample_weight=sample_weight**2)
1402
+ clf_class_weighted.fit(X, y, sample_weight=sample_weight)
1403
+ assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
1404
+
1405
+ # Make imbalanced dataset
1406
+ X_imb = np.concatenate((X[~y_is_1], X[y_is_1][:10]))
1407
+ y_imb = np.concatenate((y[~y_is_1], y[y_is_1][:10]))
1408
+
1409
+ # class_weight="balanced" is the same as sample_weights to be
1410
+ # inversely proportional to n_samples / (n_classes * np.bincount(y))
1411
+ clf_balanced = clone(clf).set_params(class_weight="balanced")
1412
+ clf_balanced.fit(X_imb, y_imb)
1413
+
1414
+ class_weight = y_imb.shape[0] / (2 * np.bincount(y_imb))
1415
+ sample_weight = class_weight[y_imb]
1416
+ clf_sample_weight = clone(clf).set_params(class_weight=None)
1417
+ clf_sample_weight.fit(X_imb, y_imb, sample_weight=sample_weight)
1418
+
1419
+ assert_allclose(
1420
+ clf_balanced.decision_function(X_imb),
1421
+ clf_sample_weight.decision_function(X_imb),
1422
+ )
1423
+
1424
+
1425
+ def test_unknown_category_that_are_negative():
1426
+ """Check that unknown categories that are negative does not error.
1427
+
1428
+ Non-regression test for #24274.
1429
+ """
1430
+ rng = np.random.RandomState(42)
1431
+ n_samples = 1000
1432
+ X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)]
1433
+ y = np.zeros(shape=n_samples)
1434
+ y[X[:, 1] % 2 == 0] = 1
1435
+
1436
+ hist = HistGradientBoostingRegressor(
1437
+ random_state=0,
1438
+ categorical_features=[False, True],
1439
+ max_iter=10,
1440
+ ).fit(X, y)
1441
+
1442
+ # Check that negative values from the second column are treated like a
1443
+ # missing category
1444
+ X_test_neg = np.asarray([[1, -2], [3, -4]])
1445
+ X_test_nan = np.asarray([[1, np.nan], [3, np.nan]])
1446
+
1447
+ assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
1448
+
1449
+
1450
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
1451
+ @pytest.mark.parametrize(
1452
+ "HistGradientBoosting",
1453
+ [HistGradientBoostingClassifier, HistGradientBoostingRegressor],
1454
+ )
1455
+ def test_dataframe_categorical_results_same_as_ndarray(
1456
+ dataframe_lib, HistGradientBoosting
1457
+ ):
1458
+ """Check that pandas categorical give the same results as ndarray."""
1459
+ pytest.importorskip(dataframe_lib)
1460
+
1461
+ rng = np.random.RandomState(42)
1462
+ n_samples = 5_000
1463
+ n_cardinality = 50
1464
+ max_bins = 100
1465
+ f_num = rng.rand(n_samples)
1466
+ f_cat = rng.randint(n_cardinality, size=n_samples)
1467
+
1468
+ # Make f_cat an informative feature
1469
+ y = (f_cat % 3 == 0) & (f_num > 0.2)
1470
+
1471
+ X = np.c_[f_num, f_cat]
1472
+ f_cat = [f"cat{c:0>3}" for c in f_cat]
1473
+ X_df = _convert_container(
1474
+ np.asarray([f_num, f_cat]).T,
1475
+ dataframe_lib,
1476
+ ["f_num", "f_cat"],
1477
+ categorical_feature_names=["f_cat"],
1478
+ )
1479
+
1480
+ X_train, X_test, X_train_df, X_test_df, y_train, y_test = train_test_split(
1481
+ X, X_df, y, random_state=0
1482
+ )
1483
+
1484
+ hist_kwargs = dict(max_iter=10, max_bins=max_bins, random_state=0)
1485
+ hist_np = HistGradientBoosting(categorical_features=[False, True], **hist_kwargs)
1486
+ hist_np.fit(X_train, y_train)
1487
+
1488
+ hist_pd = HistGradientBoosting(categorical_features="from_dtype", **hist_kwargs)
1489
+ hist_pd.fit(X_train_df, y_train)
1490
+
1491
+ # Check categories are correct and sorted
1492
+ categories = hist_pd._preprocessor.named_transformers_["encoder"].categories_[0]
1493
+ assert_array_equal(categories, np.unique(f_cat))
1494
+
1495
+ assert len(hist_np._predictors) == len(hist_pd._predictors)
1496
+ for predictor_1, predictor_2 in zip(hist_np._predictors, hist_pd._predictors):
1497
+ assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
1498
+
1499
+ score_np = hist_np.score(X_test, y_test)
1500
+ score_pd = hist_pd.score(X_test_df, y_test)
1501
+ assert score_np == pytest.approx(score_pd)
1502
+ assert_allclose(hist_np.predict(X_test), hist_pd.predict(X_test_df))
1503
+
1504
+
1505
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
1506
+ @pytest.mark.parametrize(
1507
+ "HistGradientBoosting",
1508
+ [HistGradientBoostingClassifier, HistGradientBoostingRegressor],
1509
+ )
1510
+ def test_dataframe_categorical_errors(dataframe_lib, HistGradientBoosting):
1511
+ """Check error cases for pandas categorical feature."""
1512
+ pytest.importorskip(dataframe_lib)
1513
+ msg = "Categorical feature 'f_cat' is expected to have a cardinality <= 16"
1514
+ hist = HistGradientBoosting(categorical_features="from_dtype", max_bins=16)
1515
+
1516
+ rng = np.random.RandomState(42)
1517
+ f_cat = rng.randint(0, high=100, size=100).astype(str)
1518
+ X_df = _convert_container(
1519
+ f_cat[:, None], dataframe_lib, ["f_cat"], categorical_feature_names=["f_cat"]
1520
+ )
1521
+ y = rng.randint(0, high=2, size=100)
1522
+
1523
+ with pytest.raises(ValueError, match=msg):
1524
+ hist.fit(X_df, y)
1525
+
1526
+
1527
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
1528
+ def test_categorical_different_order_same_model(dataframe_lib):
1529
+ """Check that the order of the categorical gives same model."""
1530
+ pytest.importorskip(dataframe_lib)
1531
+ rng = np.random.RandomState(42)
1532
+ n_samples = 1_000
1533
+ f_ints = rng.randint(low=0, high=2, size=n_samples)
1534
+
1535
+ # Construct a target with some noise
1536
+ y = f_ints.copy()
1537
+ flipped = rng.choice([True, False], size=n_samples, p=[0.1, 0.9])
1538
+ y[flipped] = 1 - y[flipped]
1539
+
1540
+ # Construct categorical where 0 -> A and 1 -> B and 1 -> A and 0 -> B
1541
+ f_cat_a_b = np.asarray(["A", "B"])[f_ints]
1542
+ f_cat_b_a = np.asarray(["B", "A"])[f_ints]
1543
+ df_a_b = _convert_container(
1544
+ f_cat_a_b[:, None],
1545
+ dataframe_lib,
1546
+ ["f_cat"],
1547
+ categorical_feature_names=["f_cat"],
1548
+ )
1549
+ df_b_a = _convert_container(
1550
+ f_cat_b_a[:, None],
1551
+ dataframe_lib,
1552
+ ["f_cat"],
1553
+ categorical_feature_names=["f_cat"],
1554
+ )
1555
+
1556
+ hist_a_b = HistGradientBoostingClassifier(
1557
+ categorical_features="from_dtype", random_state=0
1558
+ )
1559
+ hist_b_a = HistGradientBoostingClassifier(
1560
+ categorical_features="from_dtype", random_state=0
1561
+ )
1562
+
1563
+ hist_a_b.fit(df_a_b, y)
1564
+ hist_b_a.fit(df_b_a, y)
1565
+
1566
+ assert len(hist_a_b._predictors) == len(hist_b_a._predictors)
1567
+ for predictor_1, predictor_2 in zip(hist_a_b._predictors, hist_b_a._predictors):
1568
+ assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
1569
+
1570
+
1571
+ # TODO(1.6): Remove warning and change default in 1.6
1572
+ def test_categorical_features_warn():
1573
+ """Raise warning when there are categorical features in the input DataFrame.
1574
+
1575
+ This is not tested for polars because polars categories must always be
1576
+ strings and strings can only be handled as categories. Therefore the
1577
+ situation in which a categorical column is currently being treated as
1578
+ numbers and in the future will be treated as categories cannot occur with
1579
+ polars.
1580
+ """
1581
+ pd = pytest.importorskip("pandas")
1582
+ X = pd.DataFrame({"a": pd.Series([1, 2, 3], dtype="category"), "b": [4, 5, 6]})
1583
+ y = [0, 1, 0]
1584
+ hist = HistGradientBoostingClassifier(random_state=0)
1585
+
1586
+ msg = "The categorical_features parameter will change to 'from_dtype' in v1.6"
1587
+ with pytest.warns(FutureWarning, match=msg):
1588
+ hist.fit(X, y)
1589
+
1590
+
1591
+ def get_different_bitness_node_ndarray(node_ndarray):
1592
+ new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32
1593
+
1594
+ # field names in Node struct with np.intp types (see
1595
+ # sklearn/ensemble/_hist_gradient_boosting/common.pyx)
1596
+ indexing_field_names = ["feature_idx"]
1597
+
1598
+ new_dtype_dict = {
1599
+ name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items()
1600
+ }
1601
+ for name in indexing_field_names:
1602
+ new_dtype_dict[name] = new_dtype_for_indexing_fields
1603
+
1604
+ new_dtype = np.dtype(
1605
+ {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())}
1606
+ )
1607
+ return node_ndarray.astype(new_dtype, casting="same_kind")
1608
+
1609
+
1610
+ def reduce_predictor_with_different_bitness(predictor):
1611
+ cls, args, state = predictor.__reduce__()
1612
+
1613
+ new_state = state.copy()
1614
+ new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"])
1615
+
1616
+ return (cls, args, new_state)
1617
+
1618
+
1619
+ def test_different_bitness_pickle():
1620
+ X, y = make_classification(random_state=0)
1621
+
1622
+ clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
1623
+ clf.fit(X, y)
1624
+ score = clf.score(X, y)
1625
+
1626
+ def pickle_dump_with_different_bitness():
1627
+ f = io.BytesIO()
1628
+ p = pickle.Pickler(f)
1629
+ p.dispatch_table = copyreg.dispatch_table.copy()
1630
+ p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
1631
+
1632
+ p.dump(clf)
1633
+ f.seek(0)
1634
+ return f
1635
+
1636
+ # Simulate loading a pickle of the same model trained on a platform with different
1637
+ # bitness that than the platform it will be used to make predictions on:
1638
+ new_clf = pickle.load(pickle_dump_with_different_bitness())
1639
+ new_score = new_clf.score(X, y)
1640
+ assert score == pytest.approx(new_score)
1641
+
1642
+
1643
+ def test_different_bitness_joblib_pickle():
1644
+ # Make sure that a platform specific pickle generated on a 64 bit
1645
+ # platform can be converted at pickle load time into an estimator
1646
+ # with Cython code that works with the host's native integer precision
1647
+ # to index nodes in the tree data structure when the host is a 32 bit
1648
+ # platform (and vice versa).
1649
+ #
1650
+ # This is in particular useful to be able to train a model on a 64 bit Linux
1651
+ # server and deploy the model as part of a (32 bit) WASM in-browser
1652
+ # application using pyodide.
1653
+ X, y = make_classification(random_state=0)
1654
+
1655
+ clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
1656
+ clf.fit(X, y)
1657
+ score = clf.score(X, y)
1658
+
1659
+ def joblib_dump_with_different_bitness():
1660
+ f = io.BytesIO()
1661
+ p = NumpyPickler(f)
1662
+ p.dispatch_table = copyreg.dispatch_table.copy()
1663
+ p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
1664
+
1665
+ p.dump(clf)
1666
+ f.seek(0)
1667
+ return f
1668
+
1669
+ new_clf = joblib.load(joblib_dump_with_different_bitness())
1670
+ new_score = new_clf.score(X, y)
1671
+ assert score == pytest.approx(new_score)
1672
+
1673
+
1674
+ def test_pandas_nullable_dtype():
1675
+ # Non regression test for https://github.com/scikit-learn/scikit-learn/issues/28317
1676
+ pd = pytest.importorskip("pandas")
1677
+
1678
+ rng = np.random.default_rng(0)
1679
+ X = pd.DataFrame({"a": rng.integers(10, size=100)}).astype(pd.Int64Dtype())
1680
+ y = rng.integers(2, size=100)
1681
+
1682
+ clf = HistGradientBoostingClassifier()
1683
+ clf.fit(X, y)