applied-ai-018 commited on
Commit
fdd9207
·
verified ·
1 Parent(s): 23c294a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step40/zero/23.attention.dense.weight/fp32.pt +3 -0
  8. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd +18 -0
  23. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so +0 -0
  24. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py +321 -0
  25. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so +0 -0
  26. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd +44 -0
  27. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py +2270 -0
  28. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so +0 -0
  29. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py +144 -0
  30. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so +0 -0
  31. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py +0 -0
  32. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py +489 -0
  44. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py +64 -0
  45. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py +279 -0
  46. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py +1683 -0
  47. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py +650 -0
  48. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py +239 -0
  49. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py +435 -0
  50. venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py +187 -0
ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5f694f7b53a61f0d968ce9e518ae41e89a589a8c3c46b07302fce16bb4f2bc8
3
+ size 50332828
ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b744b93c965ec89d11d736c8cdd1af0c4ae4eb62f7915a1ce6fccba03c6853f
3
+ size 50332749
ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d79d4989b5ad3458dd6c02e7e41619e8e519e2384e4a24b2b8f15483a869a33
3
+ size 9372
ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19aac9fdbb249e7ab8da57ca7275753adf58d72f1d5292899fad981ff43e6a4c
3
+ size 9387
ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c75f03de837521794edd7528f417105fc4b6d81ab0862b294e899bcbb0996c2e
3
+ size 9293
ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82a32b5ef592e33ea72014b53b265d66a601c7412f9dcc5abb8aa7d30aff3132
3
+ size 16778411
ckpts/universal/global_step40/zero/23.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe9f1cd80247701be0ce8c75c38c50efdc796f882d2538cbb4c6930450055c0
3
+ size 16778317
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc ADDED
Binary file (35.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc ADDED
Binary file (98.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc ADDED
Binary file (67.5 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc ADDED
Binary file (35.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc ADDED
Binary file (37.9 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (379 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc ADDED
Binary file (65.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc ADDED
Binary file (4.77 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .common cimport X_BINNED_DTYPE_C
2
+ from .common cimport BITSET_DTYPE_C
3
+ from .common cimport BITSET_INNER_DTYPE_C
4
+ from .common cimport X_DTYPE_C
5
+
6
+ cdef void init_bitset(BITSET_DTYPE_C bitset) noexcept nogil
7
+
8
+ cdef void set_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil
9
+
10
+ cdef unsigned char in_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil
11
+
12
+ cpdef unsigned char in_bitset_memoryview(const BITSET_INNER_DTYPE_C[:] bitset,
13
+ X_BINNED_DTYPE_C val) noexcept nogil
14
+
15
+ cdef unsigned char in_bitset_2d_memoryview(
16
+ const BITSET_INNER_DTYPE_C [:, :] bitset,
17
+ X_BINNED_DTYPE_C val,
18
+ unsigned int row) noexcept nogil
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (229 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the BinMapper class.
3
+
4
+ BinMapper is used for mapping a real-valued dataset into integer-valued bins.
5
+ Bin thresholds are computed with the quantiles so that each bin contains
6
+ approximately the same number of samples.
7
+ """
8
+ # Author: Nicolas Hug
9
+
10
+ import numpy as np
11
+
12
+ from ...base import BaseEstimator, TransformerMixin
13
+ from ...utils import check_array, check_random_state
14
+ from ...utils._openmp_helpers import _openmp_effective_n_threads
15
+ from ...utils.fixes import percentile
16
+ from ...utils.validation import check_is_fitted
17
+ from ._binning import _map_to_bins
18
+ from ._bitset import set_bitset_memoryview
19
+ from .common import ALMOST_INF, X_BINNED_DTYPE, X_BITSET_INNER_DTYPE, X_DTYPE
20
+
21
+
22
+ def _find_binning_thresholds(col_data, max_bins):
23
+ """Extract quantiles from a continuous feature.
24
+
25
+ Missing values are ignored for finding the thresholds.
26
+
27
+ Parameters
28
+ ----------
29
+ col_data : array-like, shape (n_samples,)
30
+ The continuous feature to bin.
31
+ max_bins: int
32
+ The maximum number of bins to use for non-missing values. If for a
33
+ given feature the number of unique values is less than ``max_bins``,
34
+ then those unique values will be used to compute the bin thresholds,
35
+ instead of the quantiles
36
+
37
+ Return
38
+ ------
39
+ binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,)
40
+ The increasing numeric values that can be used to separate the bins.
41
+ A given value x will be mapped into bin value i iff
42
+ bining_thresholds[i - 1] < x <= binning_thresholds[i]
43
+ """
44
+ # ignore missing values when computing bin thresholds
45
+ missing_mask = np.isnan(col_data)
46
+ if missing_mask.any():
47
+ col_data = col_data[~missing_mask]
48
+ col_data = np.ascontiguousarray(col_data, dtype=X_DTYPE)
49
+ distinct_values = np.unique(col_data)
50
+ if len(distinct_values) <= max_bins:
51
+ midpoints = distinct_values[:-1] + distinct_values[1:]
52
+ midpoints *= 0.5
53
+ else:
54
+ # We sort again the data in this case. We could compute
55
+ # approximate midpoint percentiles using the output of
56
+ # np.unique(col_data, return_counts) instead but this is more
57
+ # work and the performance benefit will be limited because we
58
+ # work on a fixed-size subsample of the full data.
59
+ percentiles = np.linspace(0, 100, num=max_bins + 1)
60
+ percentiles = percentiles[1:-1]
61
+ midpoints = percentile(col_data, percentiles, method="midpoint").astype(X_DTYPE)
62
+ assert midpoints.shape[0] == max_bins - 1
63
+
64
+ # We avoid having +inf thresholds: +inf thresholds are only allowed in
65
+ # a "split on nan" situation.
66
+ np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)
67
+ return midpoints
68
+
69
+
70
+ class _BinMapper(TransformerMixin, BaseEstimator):
71
+ """Transformer that maps a dataset into integer-valued bins.
72
+
73
+ For continuous features, the bins are created in a feature-wise fashion,
74
+ using quantiles so that each bins contains approximately the same number
75
+ of samples. For large datasets, quantiles are computed on a subset of the
76
+ data to speed-up the binning, but the quantiles should remain stable.
77
+
78
+ For categorical features, the raw categorical values are expected to be
79
+ in [0, 254] (this is not validated here though) and each category
80
+ corresponds to a bin. All categorical values must be known at
81
+ initialization: transform() doesn't know how to bin unknown categorical
82
+ values. Note that transform() is only used on non-training data in the
83
+ case of early stopping.
84
+
85
+ Features with a small number of values may be binned into less than
86
+ ``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved
87
+ for missing values.
88
+
89
+ Parameters
90
+ ----------
91
+ n_bins : int, default=256
92
+ The maximum number of bins to use (including the bin for missing
93
+ values). Should be in [3, 256]. Non-missing values are binned on
94
+ ``max_bins = n_bins - 1`` bins. The last bin is always reserved for
95
+ missing values. If for a given feature the number of unique values is
96
+ less than ``max_bins``, then those unique values will be used to
97
+ compute the bin thresholds, instead of the quantiles. For categorical
98
+ features indicated by ``is_categorical``, the docstring for
99
+ ``is_categorical`` details on this procedure.
100
+ subsample : int or None, default=2e5
101
+ If ``n_samples > subsample``, then ``sub_samples`` samples will be
102
+ randomly chosen to compute the quantiles. If ``None``, the whole data
103
+ is used.
104
+ is_categorical : ndarray of bool of shape (n_features,), default=None
105
+ Indicates categorical features. By default, all features are
106
+ considered continuous.
107
+ known_categories : list of {ndarray, None} of shape (n_features,), \
108
+ default=none
109
+ For each categorical feature, the array indicates the set of unique
110
+ categorical values. These should be the possible values over all the
111
+ data, not just the training data. For continuous features, the
112
+ corresponding entry should be None.
113
+ random_state: int, RandomState instance or None, default=None
114
+ Pseudo-random number generator to control the random sub-sampling.
115
+ Pass an int for reproducible output across multiple
116
+ function calls.
117
+ See :term:`Glossary <random_state>`.
118
+ n_threads : int, default=None
119
+ Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
120
+ to determine the effective number of threads use, which takes cgroups CPU
121
+ quotes into account. See the docstring of `_openmp_effective_n_threads`
122
+ for details.
123
+
124
+ Attributes
125
+ ----------
126
+ bin_thresholds_ : list of ndarray
127
+ For each feature, each array indicates how to map a feature into a
128
+ binned feature. The semantic and size depends on the nature of the
129
+ feature:
130
+ - for real-valued features, the array corresponds to the real-valued
131
+ bin thresholds (the upper bound of each bin). There are ``max_bins
132
+ - 1`` thresholds, where ``max_bins = n_bins - 1`` is the number of
133
+ bins used for non-missing values.
134
+ - for categorical features, the array is a map from a binned category
135
+ value to the raw category value. The size of the array is equal to
136
+ ``min(max_bins, category_cardinality)`` where we ignore missing
137
+ values in the cardinality.
138
+ n_bins_non_missing_ : ndarray, dtype=np.uint32
139
+ For each feature, gives the number of bins actually used for
140
+ non-missing values. For features with a lot of unique values, this is
141
+ equal to ``n_bins - 1``.
142
+ is_categorical_ : ndarray of shape (n_features,), dtype=np.uint8
143
+ Indicator for categorical features.
144
+ missing_values_bin_idx_ : np.uint8
145
+ The index of the bin where missing values are mapped. This is a
146
+ constant across all features. This corresponds to the last bin, and
147
+ it is always equal to ``n_bins - 1``. Note that if ``n_bins_non_missing_``
148
+ is less than ``n_bins - 1`` for a given feature, then there are
149
+ empty (and unused) bins.
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ n_bins=256,
155
+ subsample=int(2e5),
156
+ is_categorical=None,
157
+ known_categories=None,
158
+ random_state=None,
159
+ n_threads=None,
160
+ ):
161
+ self.n_bins = n_bins
162
+ self.subsample = subsample
163
+ self.is_categorical = is_categorical
164
+ self.known_categories = known_categories
165
+ self.random_state = random_state
166
+ self.n_threads = n_threads
167
+
168
+ def fit(self, X, y=None):
169
+ """Fit data X by computing the binning thresholds.
170
+
171
+ The last bin is reserved for missing values, whether missing values
172
+ are present in the data or not.
173
+
174
+ Parameters
175
+ ----------
176
+ X : array-like of shape (n_samples, n_features)
177
+ The data to bin.
178
+ y: None
179
+ Ignored.
180
+
181
+ Returns
182
+ -------
183
+ self : object
184
+ """
185
+ if not (3 <= self.n_bins <= 256):
186
+ # min is 3: at least 2 distinct bins and a missing values bin
187
+ raise ValueError(
188
+ "n_bins={} should be no smaller than 3 and no larger than 256.".format(
189
+ self.n_bins
190
+ )
191
+ )
192
+
193
+ X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
194
+ max_bins = self.n_bins - 1
195
+
196
+ rng = check_random_state(self.random_state)
197
+ if self.subsample is not None and X.shape[0] > self.subsample:
198
+ subset = rng.choice(X.shape[0], self.subsample, replace=False)
199
+ X = X.take(subset, axis=0)
200
+
201
+ if self.is_categorical is None:
202
+ self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8)
203
+ else:
204
+ self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8)
205
+
206
+ n_features = X.shape[1]
207
+ known_categories = self.known_categories
208
+ if known_categories is None:
209
+ known_categories = [None] * n_features
210
+
211
+ # validate is_categorical and known_categories parameters
212
+ for f_idx in range(n_features):
213
+ is_categorical = self.is_categorical_[f_idx]
214
+ known_cats = known_categories[f_idx]
215
+ if is_categorical and known_cats is None:
216
+ raise ValueError(
217
+ f"Known categories for feature {f_idx} must be provided."
218
+ )
219
+ if not is_categorical and known_cats is not None:
220
+ raise ValueError(
221
+ f"Feature {f_idx} isn't marked as a categorical feature, "
222
+ "but categories were passed."
223
+ )
224
+
225
+ self.missing_values_bin_idx_ = self.n_bins - 1
226
+
227
+ self.bin_thresholds_ = []
228
+ n_bins_non_missing = []
229
+
230
+ for f_idx in range(n_features):
231
+ if not self.is_categorical_[f_idx]:
232
+ thresholds = _find_binning_thresholds(X[:, f_idx], max_bins)
233
+ n_bins_non_missing.append(thresholds.shape[0] + 1)
234
+ else:
235
+ # Since categories are assumed to be encoded in
236
+ # [0, n_cats] and since n_cats <= max_bins,
237
+ # the thresholds *are* the unique categorical values. This will
238
+ # lead to the correct mapping in transform()
239
+ thresholds = known_categories[f_idx]
240
+ n_bins_non_missing.append(thresholds.shape[0])
241
+
242
+ self.bin_thresholds_.append(thresholds)
243
+
244
+ self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32)
245
+ return self
246
+
247
+ def transform(self, X):
248
+ """Bin data X.
249
+
250
+ Missing values will be mapped to the last bin.
251
+
252
+ For categorical features, the mapping will be incorrect for unknown
253
+ categories. Since the BinMapper is given known_categories of the
254
+ entire training data (i.e. before the call to train_test_split() in
255
+ case of early-stopping), this never happens.
256
+
257
+ Parameters
258
+ ----------
259
+ X : array-like of shape (n_samples, n_features)
260
+ The data to bin.
261
+
262
+ Returns
263
+ -------
264
+ X_binned : array-like of shape (n_samples, n_features)
265
+ The binned data (fortran-aligned).
266
+ """
267
+ X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
268
+ check_is_fitted(self)
269
+ if X.shape[1] != self.n_bins_non_missing_.shape[0]:
270
+ raise ValueError(
271
+ "This estimator was fitted with {} features but {} got passed "
272
+ "to transform()".format(self.n_bins_non_missing_.shape[0], X.shape[1])
273
+ )
274
+
275
+ n_threads = _openmp_effective_n_threads(self.n_threads)
276
+ binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F")
277
+ _map_to_bins(
278
+ X,
279
+ self.bin_thresholds_,
280
+ self.is_categorical_,
281
+ self.missing_values_bin_idx_,
282
+ n_threads,
283
+ binned,
284
+ )
285
+ return binned
286
+
287
+ def make_known_categories_bitsets(self):
288
+ """Create bitsets of known categories.
289
+
290
+ Returns
291
+ -------
292
+ - known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
293
+ Array of bitsets of known categories, for each categorical feature.
294
+ - f_idx_map : ndarray of shape (n_features,)
295
+ Map from original feature index to the corresponding index in the
296
+ known_cat_bitsets array.
297
+ """
298
+
299
+ categorical_features_indices = np.flatnonzero(self.is_categorical_)
300
+
301
+ n_features = self.is_categorical_.size
302
+ n_categorical_features = categorical_features_indices.size
303
+
304
+ f_idx_map = np.zeros(n_features, dtype=np.uint32)
305
+ f_idx_map[categorical_features_indices] = np.arange(
306
+ n_categorical_features, dtype=np.uint32
307
+ )
308
+
309
+ known_categories = self.bin_thresholds_
310
+
311
+ known_cat_bitsets = np.zeros(
312
+ (n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE
313
+ )
314
+
315
+ # TODO: complexity is O(n_categorical_features * 255). Maybe this is
316
+ # worth cythonizing
317
+ for mapped_f_idx, f_idx in enumerate(categorical_features_indices):
318
+ for raw_cat_val in known_categories[f_idx]:
319
+ set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val)
320
+
321
+ return known_cat_bitsets, f_idx_map
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (146 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as cnp
2
+ from sklearn.utils._typedefs cimport intp_t
3
+
4
+ cnp.import_array()
5
+
6
+
7
+ ctypedef cnp.npy_float64 X_DTYPE_C
8
+ ctypedef cnp.npy_uint8 X_BINNED_DTYPE_C
9
+ ctypedef cnp.npy_float64 Y_DTYPE_C
10
+ ctypedef cnp.npy_float32 G_H_DTYPE_C
11
+ ctypedef cnp.npy_uint32 BITSET_INNER_DTYPE_C
12
+ ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C
13
+
14
+ cdef packed struct hist_struct:
15
+ # Same as histogram dtype but we need a struct to declare views. It needs
16
+ # to be packed since by default numpy dtypes aren't aligned
17
+ Y_DTYPE_C sum_gradients
18
+ Y_DTYPE_C sum_hessians
19
+ unsigned int count
20
+
21
+
22
+ cdef packed struct node_struct:
23
+ # Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It
24
+ # needs to be packed since by default numpy dtypes aren't aligned
25
+ Y_DTYPE_C value
26
+ unsigned int count
27
+ intp_t feature_idx
28
+ X_DTYPE_C num_threshold
29
+ unsigned char missing_go_to_left
30
+ unsigned int left
31
+ unsigned int right
32
+ Y_DTYPE_C gain
33
+ unsigned int depth
34
+ unsigned char is_leaf
35
+ X_BINNED_DTYPE_C bin_threshold
36
+ unsigned char is_categorical
37
+ # The index of the corresponding bitsets in the Predictor's bitset arrays.
38
+ # Only used if is_categorical is True
39
+ unsigned int bitset_idx
40
+
41
+ cpdef enum MonotonicConstraint:
42
+ NO_CST = 0
43
+ POS = 1
44
+ NEG = -1
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py ADDED
@@ -0,0 +1,2270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Fast Gradient Boosting decision trees for classification and regression."""
2
+
3
+ # Author: Nicolas Hug
4
+
5
+ import itertools
6
+ import warnings
7
+ from abc import ABC, abstractmethod
8
+ from contextlib import contextmanager, nullcontext, suppress
9
+ from functools import partial
10
+ from numbers import Integral, Real
11
+ from time import time
12
+
13
+ import numpy as np
14
+
15
+ from ..._loss.loss import (
16
+ _LOSSES,
17
+ BaseLoss,
18
+ HalfBinomialLoss,
19
+ HalfGammaLoss,
20
+ HalfMultinomialLoss,
21
+ HalfPoissonLoss,
22
+ PinballLoss,
23
+ )
24
+ from ...base import (
25
+ BaseEstimator,
26
+ ClassifierMixin,
27
+ RegressorMixin,
28
+ _fit_context,
29
+ is_classifier,
30
+ )
31
+ from ...compose import ColumnTransformer
32
+ from ...metrics import check_scoring
33
+ from ...metrics._scorer import _SCORERS
34
+ from ...model_selection import train_test_split
35
+ from ...preprocessing import FunctionTransformer, LabelEncoder, OrdinalEncoder
36
+ from ...utils import check_random_state, compute_sample_weight, is_scalar_nan, resample
37
+ from ...utils._openmp_helpers import _openmp_effective_n_threads
38
+ from ...utils._param_validation import Hidden, Interval, RealNotInt, StrOptions
39
+ from ...utils.multiclass import check_classification_targets
40
+ from ...utils.validation import (
41
+ _check_monotonic_cst,
42
+ _check_sample_weight,
43
+ _check_y,
44
+ _is_pandas_df,
45
+ check_array,
46
+ check_consistent_length,
47
+ check_is_fitted,
48
+ )
49
+ from ._gradient_boosting import _update_raw_predictions
50
+ from .binning import _BinMapper
51
+ from .common import G_H_DTYPE, X_DTYPE, Y_DTYPE
52
+ from .grower import TreeGrower
53
+
54
+ _LOSSES = _LOSSES.copy()
55
+ _LOSSES.update(
56
+ {
57
+ "poisson": HalfPoissonLoss,
58
+ "gamma": HalfGammaLoss,
59
+ "quantile": PinballLoss,
60
+ }
61
+ )
62
+
63
+
64
+ def _update_leaves_values(loss, grower, y_true, raw_prediction, sample_weight):
65
+ """Update the leaf values to be predicted by the tree.
66
+
67
+ Update equals:
68
+ loss.fit_intercept_only(y_true - raw_prediction)
69
+
70
+ This is only applied if loss.differentiable is False.
71
+ Note: It only works, if the loss is a function of the residual, as is the
72
+ case for AbsoluteError and PinballLoss. Otherwise, one would need to get
73
+ the minimum of loss(y_true, raw_prediction + x) in x. A few examples:
74
+ - AbsoluteError: median(y_true - raw_prediction).
75
+ - PinballLoss: quantile(y_true - raw_prediction).
76
+
77
+ More background:
78
+ For the standard gradient descent method according to "Greedy Function
79
+ Approximation: A Gradient Boosting Machine" by Friedman, all loss functions but the
80
+ squared loss need a line search step. BaseHistGradientBoosting, however, implements
81
+ a so called Newton boosting where the trees are fitted to a 2nd order
82
+ approximations of the loss in terms of gradients and hessians. In this case, the
83
+ line search step is only necessary if the loss is not smooth, i.e. not
84
+ differentiable, which renders the 2nd order approximation invalid. In fact,
85
+ non-smooth losses arbitrarily set hessians to 1 and effectively use the standard
86
+ gradient descent method with line search.
87
+ """
88
+ # TODO: Ideally this should be computed in parallel over the leaves using something
89
+ # similar to _update_raw_predictions(), but this requires a cython version of
90
+ # median().
91
+ for leaf in grower.finalized_leaves:
92
+ indices = leaf.sample_indices
93
+ if sample_weight is None:
94
+ sw = None
95
+ else:
96
+ sw = sample_weight[indices]
97
+ update = loss.fit_intercept_only(
98
+ y_true=y_true[indices] - raw_prediction[indices],
99
+ sample_weight=sw,
100
+ )
101
+ leaf.value = grower.shrinkage * update
102
+ # Note that the regularization is ignored here
103
+
104
+
105
+ @contextmanager
106
+ def _patch_raw_predict(estimator, raw_predictions):
107
+ """Context manager that patches _raw_predict to return raw_predictions.
108
+
109
+ `raw_predictions` is typically a precomputed array to avoid redundant
110
+ state-wise computations fitting with early stopping enabled: in this case
111
+ `raw_predictions` is incrementally updated whenever we add a tree to the
112
+ boosted ensemble.
113
+
114
+ Note: this makes fitting HistGradientBoosting* models inherently non thread
115
+ safe at fit time. However thread-safety at fit time was never guaranteed nor
116
+ enforced for scikit-learn estimators in general.
117
+
118
+ Thread-safety at prediction/transform time is another matter as those
119
+ operations are typically side-effect free and therefore often thread-safe by
120
+ default for most scikit-learn models and would like to keep it that way.
121
+ Therefore this context manager should only be used at fit time.
122
+
123
+ TODO: in the future, we could explore the possibility to extend the scorer
124
+ public API to expose a way to compute vales from raw predictions. That would
125
+ probably require also making the scorer aware of the inverse link function
126
+ used by the estimator which is typically private API for now, hence the need
127
+ for this patching mechanism.
128
+ """
129
+ orig_raw_predict = estimator._raw_predict
130
+
131
+ def _patched_raw_predicts(*args, **kwargs):
132
+ return raw_predictions
133
+
134
+ estimator._raw_predict = _patched_raw_predicts
135
+ yield estimator
136
+ estimator._raw_predict = orig_raw_predict
137
+
138
+
139
+ class BaseHistGradientBoosting(BaseEstimator, ABC):
140
+ """Base class for histogram-based gradient boosting estimators."""
141
+
142
+ _parameter_constraints: dict = {
143
+ "loss": [BaseLoss],
144
+ "learning_rate": [Interval(Real, 0, None, closed="neither")],
145
+ "max_iter": [Interval(Integral, 1, None, closed="left")],
146
+ "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
147
+ "max_depth": [Interval(Integral, 1, None, closed="left"), None],
148
+ "min_samples_leaf": [Interval(Integral, 1, None, closed="left")],
149
+ "l2_regularization": [Interval(Real, 0, None, closed="left")],
150
+ "max_features": [Interval(RealNotInt, 0, 1, closed="right")],
151
+ "monotonic_cst": ["array-like", dict, None],
152
+ "interaction_cst": [
153
+ list,
154
+ tuple,
155
+ StrOptions({"pairwise", "no_interactions"}),
156
+ None,
157
+ ],
158
+ "n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
159
+ "validation_fraction": [
160
+ Interval(RealNotInt, 0, 1, closed="neither"),
161
+ Interval(Integral, 1, None, closed="left"),
162
+ None,
163
+ ],
164
+ "tol": [Interval(Real, 0, None, closed="left")],
165
+ "max_bins": [Interval(Integral, 2, 255, closed="both")],
166
+ "categorical_features": [
167
+ "array-like",
168
+ StrOptions({"from_dtype"}),
169
+ Hidden(StrOptions({"warn"})),
170
+ None,
171
+ ],
172
+ "warm_start": ["boolean"],
173
+ "early_stopping": [StrOptions({"auto"}), "boolean"],
174
+ "scoring": [str, callable, None],
175
+ "verbose": ["verbose"],
176
+ "random_state": ["random_state"],
177
+ }
178
+
179
+ @abstractmethod
180
+ def __init__(
181
+ self,
182
+ loss,
183
+ *,
184
+ learning_rate,
185
+ max_iter,
186
+ max_leaf_nodes,
187
+ max_depth,
188
+ min_samples_leaf,
189
+ l2_regularization,
190
+ max_features,
191
+ max_bins,
192
+ categorical_features,
193
+ monotonic_cst,
194
+ interaction_cst,
195
+ warm_start,
196
+ early_stopping,
197
+ scoring,
198
+ validation_fraction,
199
+ n_iter_no_change,
200
+ tol,
201
+ verbose,
202
+ random_state,
203
+ ):
204
+ self.loss = loss
205
+ self.learning_rate = learning_rate
206
+ self.max_iter = max_iter
207
+ self.max_leaf_nodes = max_leaf_nodes
208
+ self.max_depth = max_depth
209
+ self.min_samples_leaf = min_samples_leaf
210
+ self.l2_regularization = l2_regularization
211
+ self.max_features = max_features
212
+ self.max_bins = max_bins
213
+ self.monotonic_cst = monotonic_cst
214
+ self.interaction_cst = interaction_cst
215
+ self.categorical_features = categorical_features
216
+ self.warm_start = warm_start
217
+ self.early_stopping = early_stopping
218
+ self.scoring = scoring
219
+ self.validation_fraction = validation_fraction
220
+ self.n_iter_no_change = n_iter_no_change
221
+ self.tol = tol
222
+ self.verbose = verbose
223
+ self.random_state = random_state
224
+
225
+ def _validate_parameters(self):
226
+ """Validate parameters passed to __init__.
227
+
228
+ The parameters that are directly passed to the grower are checked in
229
+ TreeGrower."""
230
+ if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
231
+ raise ValueError(
232
+ "monotonic constraints are not supported for multiclass classification."
233
+ )
234
+
235
+ def _finalize_sample_weight(self, sample_weight, y):
236
+ """Finalize sample weight.
237
+
238
+ Used by subclasses to adjust sample_weights. This is useful for implementing
239
+ class weights.
240
+ """
241
+ return sample_weight
242
+
243
+ def _preprocess_X(self, X, *, reset):
244
+ """Preprocess and validate X.
245
+
246
+ Parameters
247
+ ----------
248
+ X : {array-like, pandas DataFrame} of shape (n_samples, n_features)
249
+ Input data.
250
+
251
+ reset : bool
252
+ Whether to reset the `n_features_in_` and `feature_names_in_ attributes.
253
+
254
+ Returns
255
+ -------
256
+ X : ndarray of shape (n_samples, n_features)
257
+ Validated input data.
258
+
259
+ known_categories : list of ndarray of shape (n_categories,)
260
+ List of known categories for each categorical feature.
261
+ """
262
+ # If there is a preprocessor, we let the preprocessor handle the validation.
263
+ # Otherwise, we validate the data ourselves.
264
+ check_X_kwargs = dict(dtype=[X_DTYPE], force_all_finite=False)
265
+ if not reset:
266
+ if self._preprocessor is None:
267
+ return self._validate_data(X, reset=False, **check_X_kwargs)
268
+ return self._preprocessor.transform(X)
269
+
270
+ # At this point, reset is False, which runs during `fit`.
271
+ self.is_categorical_ = self._check_categorical_features(X)
272
+
273
+ if self.is_categorical_ is None:
274
+ self._preprocessor = None
275
+ self._is_categorical_remapped = None
276
+
277
+ X = self._validate_data(X, **check_X_kwargs)
278
+ return X, None
279
+
280
+ n_features = X.shape[1]
281
+ ordinal_encoder = OrdinalEncoder(
282
+ categories="auto",
283
+ handle_unknown="use_encoded_value",
284
+ unknown_value=np.nan,
285
+ encoded_missing_value=np.nan,
286
+ dtype=X_DTYPE,
287
+ )
288
+
289
+ check_X = partial(check_array, **check_X_kwargs)
290
+ numerical_preprocessor = FunctionTransformer(check_X)
291
+ self._preprocessor = ColumnTransformer(
292
+ [
293
+ ("encoder", ordinal_encoder, self.is_categorical_),
294
+ ("numerical", numerical_preprocessor, ~self.is_categorical_),
295
+ ]
296
+ )
297
+ self._preprocessor.set_output(transform="default")
298
+ X = self._preprocessor.fit_transform(X)
299
+ # check categories found by the OrdinalEncoder and get their encoded values
300
+ known_categories = self._check_categories()
301
+ self.n_features_in_ = self._preprocessor.n_features_in_
302
+ with suppress(AttributeError):
303
+ self.feature_names_in_ = self._preprocessor.feature_names_in_
304
+
305
+ # The ColumnTransformer's output places the categorical features at the
306
+ # beginning
307
+ categorical_remapped = np.zeros(n_features, dtype=bool)
308
+ categorical_remapped[self._preprocessor.output_indices_["encoder"]] = True
309
+ self._is_categorical_remapped = categorical_remapped
310
+
311
+ return X, known_categories
312
+
313
+ def _check_categories(self):
314
+ """Check categories found by the preprocessor and return their encoded values.
315
+
316
+ Returns a list of length ``self.n_features_in_``, with one entry per
317
+ input feature.
318
+
319
+ For non-categorical features, the corresponding entry is ``None``.
320
+
321
+ For categorical features, the corresponding entry is an array
322
+ containing the categories as encoded by the preprocessor (an
323
+ ``OrdinalEncoder``), excluding missing values. The entry is therefore
324
+ ``np.arange(n_categories)`` where ``n_categories`` is the number of
325
+ unique values in the considered feature column, after removing missing
326
+ values.
327
+
328
+ If ``n_categories > self.max_bins`` for any feature, a ``ValueError``
329
+ is raised.
330
+ """
331
+ encoder = self._preprocessor.named_transformers_["encoder"]
332
+ known_categories = [None] * self._preprocessor.n_features_in_
333
+ categorical_column_indices = np.arange(self._preprocessor.n_features_in_)[
334
+ self._preprocessor.output_indices_["encoder"]
335
+ ]
336
+ for feature_idx, categories in zip(
337
+ categorical_column_indices, encoder.categories_
338
+ ):
339
+ # OrdinalEncoder always puts np.nan as the last category if the
340
+ # training data has missing values. Here we remove it because it is
341
+ # already added by the _BinMapper.
342
+ if len(categories) and is_scalar_nan(categories[-1]):
343
+ categories = categories[:-1]
344
+ if categories.size > self.max_bins:
345
+ try:
346
+ feature_name = repr(encoder.feature_names_in_[feature_idx])
347
+ except AttributeError:
348
+ feature_name = f"at index {feature_idx}"
349
+ raise ValueError(
350
+ f"Categorical feature {feature_name} is expected to "
351
+ f"have a cardinality <= {self.max_bins} but actually "
352
+ f"has a cardinality of {categories.size}."
353
+ )
354
+ known_categories[feature_idx] = np.arange(len(categories), dtype=X_DTYPE)
355
+ return known_categories
356
+
357
+ def _check_categorical_features(self, X):
358
+ """Check and validate categorical features in X
359
+
360
+ Parameters
361
+ ----------
362
+ X : {array-like, pandas DataFrame} of shape (n_samples, n_features)
363
+ Input data.
364
+
365
+ Return
366
+ ------
367
+ is_categorical : ndarray of shape (n_features,) or None, dtype=bool
368
+ Indicates whether a feature is categorical. If no feature is
369
+ categorical, this is None.
370
+ """
371
+ # Special code for pandas because of a bug in recent pandas, which is
372
+ # fixed in main and maybe included in 2.2.1, see
373
+ # https://github.com/pandas-dev/pandas/pull/57173.
374
+ # Also pandas versions < 1.5.1 do not support the dataframe interchange
375
+ if _is_pandas_df(X):
376
+ X_is_dataframe = True
377
+ categorical_columns_mask = np.asarray(X.dtypes == "category")
378
+ X_has_categorical_columns = categorical_columns_mask.any()
379
+ elif hasattr(X, "__dataframe__"):
380
+ X_is_dataframe = True
381
+ categorical_columns_mask = np.asarray(
382
+ [
383
+ c.dtype[0].name == "CATEGORICAL"
384
+ for c in X.__dataframe__().get_columns()
385
+ ]
386
+ )
387
+ X_has_categorical_columns = categorical_columns_mask.any()
388
+ else:
389
+ X_is_dataframe = False
390
+ categorical_columns_mask = None
391
+ X_has_categorical_columns = False
392
+
393
+ # TODO(1.6): Remove warning and change default to "from_dtype" in v1.6
394
+ if (
395
+ isinstance(self.categorical_features, str)
396
+ and self.categorical_features == "warn"
397
+ ):
398
+ if X_has_categorical_columns:
399
+ warnings.warn(
400
+ (
401
+ "The categorical_features parameter will change to 'from_dtype'"
402
+ " in v1.6. The 'from_dtype' option automatically treats"
403
+ " categorical dtypes in a DataFrame as categorical features."
404
+ ),
405
+ FutureWarning,
406
+ )
407
+ categorical_features = None
408
+ else:
409
+ categorical_features = self.categorical_features
410
+
411
+ categorical_by_dtype = (
412
+ isinstance(categorical_features, str)
413
+ and categorical_features == "from_dtype"
414
+ )
415
+ no_categorical_dtype = categorical_features is None or (
416
+ categorical_by_dtype and not X_is_dataframe
417
+ )
418
+
419
+ if no_categorical_dtype:
420
+ return None
421
+
422
+ use_pandas_categorical = categorical_by_dtype and X_is_dataframe
423
+ if use_pandas_categorical:
424
+ categorical_features = categorical_columns_mask
425
+ else:
426
+ categorical_features = np.asarray(categorical_features)
427
+
428
+ if categorical_features.size == 0:
429
+ return None
430
+
431
+ if categorical_features.dtype.kind not in ("i", "b", "U", "O"):
432
+ raise ValueError(
433
+ "categorical_features must be an array-like of bool, int or "
434
+ f"str, got: {categorical_features.dtype.name}."
435
+ )
436
+
437
+ if categorical_features.dtype.kind == "O":
438
+ types = set(type(f) for f in categorical_features)
439
+ if types != {str}:
440
+ raise ValueError(
441
+ "categorical_features must be an array-like of bool, int or "
442
+ f"str, got: {', '.join(sorted(t.__name__ for t in types))}."
443
+ )
444
+
445
+ n_features = X.shape[1]
446
+ # At this point `_validate_data` was not called yet because we want to use the
447
+ # dtypes are used to discover the categorical features. Thus `feature_names_in_`
448
+ # is not defined yet.
449
+ feature_names_in_ = getattr(X, "columns", None)
450
+
451
+ if categorical_features.dtype.kind in ("U", "O"):
452
+ # check for feature names
453
+ if feature_names_in_ is None:
454
+ raise ValueError(
455
+ "categorical_features should be passed as an array of "
456
+ "integers or as a boolean mask when the model is fitted "
457
+ "on data without feature names."
458
+ )
459
+ is_categorical = np.zeros(n_features, dtype=bool)
460
+ feature_names = list(feature_names_in_)
461
+ for feature_name in categorical_features:
462
+ try:
463
+ is_categorical[feature_names.index(feature_name)] = True
464
+ except ValueError as e:
465
+ raise ValueError(
466
+ f"categorical_features has a item value '{feature_name}' "
467
+ "which is not a valid feature name of the training "
468
+ f"data. Observed feature names: {feature_names}"
469
+ ) from e
470
+ elif categorical_features.dtype.kind == "i":
471
+ # check for categorical features as indices
472
+ if (
473
+ np.max(categorical_features) >= n_features
474
+ or np.min(categorical_features) < 0
475
+ ):
476
+ raise ValueError(
477
+ "categorical_features set as integer "
478
+ "indices must be in [0, n_features - 1]"
479
+ )
480
+ is_categorical = np.zeros(n_features, dtype=bool)
481
+ is_categorical[categorical_features] = True
482
+ else:
483
+ if categorical_features.shape[0] != n_features:
484
+ raise ValueError(
485
+ "categorical_features set as a boolean mask "
486
+ "must have shape (n_features,), got: "
487
+ f"{categorical_features.shape}"
488
+ )
489
+ is_categorical = categorical_features
490
+
491
+ if not np.any(is_categorical):
492
+ return None
493
+ return is_categorical
494
+
495
+ def _check_interaction_cst(self, n_features):
496
+ """Check and validation for interaction constraints."""
497
+ if self.interaction_cst is None:
498
+ return None
499
+
500
+ if self.interaction_cst == "no_interactions":
501
+ interaction_cst = [[i] for i in range(n_features)]
502
+ elif self.interaction_cst == "pairwise":
503
+ interaction_cst = itertools.combinations(range(n_features), 2)
504
+ else:
505
+ interaction_cst = self.interaction_cst
506
+
507
+ try:
508
+ constraints = [set(group) for group in interaction_cst]
509
+ except TypeError:
510
+ raise ValueError(
511
+ "Interaction constraints must be a sequence of tuples or lists, got:"
512
+ f" {self.interaction_cst!r}."
513
+ )
514
+
515
+ for group in constraints:
516
+ for x in group:
517
+ if not (isinstance(x, Integral) and 0 <= x < n_features):
518
+ raise ValueError(
519
+ "Interaction constraints must consist of integer indices in"
520
+ f" [0, n_features - 1] = [0, {n_features - 1}], specifying the"
521
+ " position of features, got invalid indices:"
522
+ f" {group!r}"
523
+ )
524
+
525
+ # Add all not listed features as own group by default.
526
+ rest = set(range(n_features)) - set().union(*constraints)
527
+ if len(rest) > 0:
528
+ constraints.append(rest)
529
+
530
+ return constraints
531
+
532
+ @_fit_context(prefer_skip_nested_validation=True)
533
+ def fit(self, X, y, sample_weight=None):
534
+ """Fit the gradient boosting model.
535
+
536
+ Parameters
537
+ ----------
538
+ X : array-like of shape (n_samples, n_features)
539
+ The input samples.
540
+
541
+ y : array-like of shape (n_samples,)
542
+ Target values.
543
+
544
+ sample_weight : array-like of shape (n_samples,) default=None
545
+ Weights of training data.
546
+
547
+ .. versionadded:: 0.23
548
+
549
+ Returns
550
+ -------
551
+ self : object
552
+ Fitted estimator.
553
+ """
554
+ fit_start_time = time()
555
+ acc_find_split_time = 0.0 # time spent finding the best splits
556
+ acc_apply_split_time = 0.0 # time spent splitting nodes
557
+ acc_compute_hist_time = 0.0 # time spent computing histograms
558
+ # time spent predicting X for gradient and hessians update
559
+ acc_prediction_time = 0.0
560
+ X, known_categories = self._preprocess_X(X, reset=True)
561
+ y = _check_y(y, estimator=self)
562
+ y = self._encode_y(y)
563
+ check_consistent_length(X, y)
564
+ # Do not create unit sample weights by default to later skip some
565
+ # computation
566
+ if sample_weight is not None:
567
+ sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
568
+ # TODO: remove when PDP supports sample weights
569
+ self._fitted_with_sw = True
570
+
571
+ sample_weight = self._finalize_sample_weight(sample_weight, y)
572
+
573
+ rng = check_random_state(self.random_state)
574
+
575
+ # When warm starting, we want to reuse the same seed that was used
576
+ # the first time fit was called (e.g. train/val split).
577
+ # For feature subsampling, we want to continue with the rng we started with.
578
+ if not self.warm_start or not self._is_fitted():
579
+ self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
580
+ feature_subsample_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
581
+ self._feature_subsample_rng = np.random.default_rng(feature_subsample_seed)
582
+
583
+ self._validate_parameters()
584
+ monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
585
+
586
+ # used for validation in predict
587
+ n_samples, self._n_features = X.shape
588
+
589
+ # Encode constraints into a list of sets of features indices (integers).
590
+ interaction_cst = self._check_interaction_cst(self._n_features)
591
+
592
+ # we need this stateful variable to tell raw_predict() that it was
593
+ # called from fit() (this current method), and that the data it has
594
+ # received is pre-binned.
595
+ # predicting is faster on pre-binned data, so we want early stopping
596
+ # predictions to be made on pre-binned data. Unfortunately the _scorer
597
+ # can only call predict() or predict_proba(), not raw_predict(), and
598
+ # there's no way to tell the scorer that it needs to predict binned
599
+ # data.
600
+ self._in_fit = True
601
+
602
+ # `_openmp_effective_n_threads` is used to take cgroups CPU quotes
603
+ # into account when determine the maximum number of threads to use.
604
+ n_threads = _openmp_effective_n_threads()
605
+
606
+ if isinstance(self.loss, str):
607
+ self._loss = self._get_loss(sample_weight=sample_weight)
608
+ elif isinstance(self.loss, BaseLoss):
609
+ self._loss = self.loss
610
+
611
+ if self.early_stopping == "auto":
612
+ self.do_early_stopping_ = n_samples > 10000
613
+ else:
614
+ self.do_early_stopping_ = self.early_stopping
615
+
616
+ # create validation data if needed
617
+ self._use_validation_data = self.validation_fraction is not None
618
+ if self.do_early_stopping_ and self._use_validation_data:
619
+ # stratify for classification
620
+ # instead of checking predict_proba, loss.n_classes >= 2 would also work
621
+ stratify = y if hasattr(self._loss, "predict_proba") else None
622
+
623
+ # Save the state of the RNG for the training and validation split.
624
+ # This is needed in order to have the same split when using
625
+ # warm starting.
626
+
627
+ if sample_weight is None:
628
+ X_train, X_val, y_train, y_val = train_test_split(
629
+ X,
630
+ y,
631
+ test_size=self.validation_fraction,
632
+ stratify=stratify,
633
+ random_state=self._random_seed,
634
+ )
635
+ sample_weight_train = sample_weight_val = None
636
+ else:
637
+ # TODO: incorporate sample_weight in sampling here, as well as
638
+ # stratify
639
+ (
640
+ X_train,
641
+ X_val,
642
+ y_train,
643
+ y_val,
644
+ sample_weight_train,
645
+ sample_weight_val,
646
+ ) = train_test_split(
647
+ X,
648
+ y,
649
+ sample_weight,
650
+ test_size=self.validation_fraction,
651
+ stratify=stratify,
652
+ random_state=self._random_seed,
653
+ )
654
+ else:
655
+ X_train, y_train, sample_weight_train = X, y, sample_weight
656
+ X_val = y_val = sample_weight_val = None
657
+
658
+ # Bin the data
659
+ # For ease of use of the API, the user-facing GBDT classes accept the
660
+ # parameter max_bins, which doesn't take into account the bin for
661
+ # missing values (which is always allocated). However, since max_bins
662
+ # isn't the true maximal number of bins, all other private classes
663
+ # (binmapper, histbuilder...) accept n_bins instead, which is the
664
+ # actual total number of bins. Everywhere in the code, the
665
+ # convention is that n_bins == max_bins + 1
666
+ n_bins = self.max_bins + 1 # + 1 for missing values
667
+ self._bin_mapper = _BinMapper(
668
+ n_bins=n_bins,
669
+ is_categorical=self._is_categorical_remapped,
670
+ known_categories=known_categories,
671
+ random_state=self._random_seed,
672
+ n_threads=n_threads,
673
+ )
674
+ X_binned_train = self._bin_data(X_train, is_training_data=True)
675
+ if X_val is not None:
676
+ X_binned_val = self._bin_data(X_val, is_training_data=False)
677
+ else:
678
+ X_binned_val = None
679
+
680
+ # Uses binned data to check for missing values
681
+ has_missing_values = (
682
+ (X_binned_train == self._bin_mapper.missing_values_bin_idx_)
683
+ .any(axis=0)
684
+ .astype(np.uint8)
685
+ )
686
+
687
+ if self.verbose:
688
+ print("Fitting gradient boosted rounds:")
689
+
690
+ n_samples = X_binned_train.shape[0]
691
+ scoring_is_predefined_string = self.scoring in _SCORERS
692
+ need_raw_predictions_val = X_binned_val is not None and (
693
+ scoring_is_predefined_string or self.scoring == "loss"
694
+ )
695
+ # First time calling fit, or no warm start
696
+ if not (self._is_fitted() and self.warm_start):
697
+ # Clear random state and score attributes
698
+ self._clear_state()
699
+
700
+ # initialize raw_predictions: those are the accumulated values
701
+ # predicted by the trees for the training data. raw_predictions has
702
+ # shape (n_samples, n_trees_per_iteration) where
703
+ # n_trees_per_iterations is n_classes in multiclass classification,
704
+ # else 1.
705
+ # self._baseline_prediction has shape (1, n_trees_per_iteration)
706
+ self._baseline_prediction = self._loss.fit_intercept_only(
707
+ y_true=y_train, sample_weight=sample_weight_train
708
+ ).reshape((1, -1))
709
+ raw_predictions = np.zeros(
710
+ shape=(n_samples, self.n_trees_per_iteration_),
711
+ dtype=self._baseline_prediction.dtype,
712
+ order="F",
713
+ )
714
+ raw_predictions += self._baseline_prediction
715
+
716
+ # predictors is a matrix (list of lists) of TreePredictor objects
717
+ # with shape (n_iter_, n_trees_per_iteration)
718
+ self._predictors = predictors = []
719
+
720
+ # Initialize structures and attributes related to early stopping
721
+ self._scorer = None # set if scoring != loss
722
+ raw_predictions_val = None # set if use val and scoring is a string
723
+ self.train_score_ = []
724
+ self.validation_score_ = []
725
+
726
+ if self.do_early_stopping_:
727
+ # populate train_score and validation_score with the
728
+ # predictions of the initial model (before the first tree)
729
+
730
+ # Create raw_predictions_val for storing the raw predictions of
731
+ # the validation data.
732
+ if need_raw_predictions_val:
733
+ raw_predictions_val = np.zeros(
734
+ shape=(X_binned_val.shape[0], self.n_trees_per_iteration_),
735
+ dtype=self._baseline_prediction.dtype,
736
+ order="F",
737
+ )
738
+
739
+ raw_predictions_val += self._baseline_prediction
740
+
741
+ if self.scoring == "loss":
742
+ # we're going to compute scoring w.r.t the loss. As losses
743
+ # take raw predictions as input (unlike the scorers), we
744
+ # can optimize a bit and avoid repeating computing the
745
+ # predictions of the previous trees. We'll reuse
746
+ # raw_predictions (as it's needed for training anyway) for
747
+ # evaluating the training loss.
748
+
749
+ self._check_early_stopping_loss(
750
+ raw_predictions=raw_predictions,
751
+ y_train=y_train,
752
+ sample_weight_train=sample_weight_train,
753
+ raw_predictions_val=raw_predictions_val,
754
+ y_val=y_val,
755
+ sample_weight_val=sample_weight_val,
756
+ n_threads=n_threads,
757
+ )
758
+ else:
759
+ self._scorer = check_scoring(self, self.scoring)
760
+ # _scorer is a callable with signature (est, X, y) and
761
+ # calls est.predict() or est.predict_proba() depending on
762
+ # its nature.
763
+ # Unfortunately, each call to _scorer() will compute
764
+ # the predictions of all the trees. So we use a subset of
765
+ # the training set to compute train scores.
766
+
767
+ # Compute the subsample set
768
+ (
769
+ X_binned_small_train,
770
+ y_small_train,
771
+ sample_weight_small_train,
772
+ indices_small_train,
773
+ ) = self._get_small_trainset(
774
+ X_binned_train,
775
+ y_train,
776
+ sample_weight_train,
777
+ self._random_seed,
778
+ )
779
+
780
+ # If the scorer is a predefined string, then we optimize
781
+ # the evaluation by re-using the incrementally updated raw
782
+ # predictions.
783
+ if scoring_is_predefined_string:
784
+ raw_predictions_small_train = raw_predictions[
785
+ indices_small_train
786
+ ]
787
+ else:
788
+ raw_predictions_small_train = None
789
+
790
+ self._check_early_stopping_scorer(
791
+ X_binned_small_train,
792
+ y_small_train,
793
+ sample_weight_small_train,
794
+ X_binned_val,
795
+ y_val,
796
+ sample_weight_val,
797
+ raw_predictions_small_train=raw_predictions_small_train,
798
+ raw_predictions_val=raw_predictions_val,
799
+ )
800
+ begin_at_stage = 0
801
+
802
+ # warm start: this is not the first time fit was called
803
+ else:
804
+ # Check that the maximum number of iterations is not smaller
805
+ # than the number of iterations from the previous fit
806
+ if self.max_iter < self.n_iter_:
807
+ raise ValueError(
808
+ "max_iter=%d must be larger than or equal to "
809
+ "n_iter_=%d when warm_start==True" % (self.max_iter, self.n_iter_)
810
+ )
811
+
812
+ # Convert array attributes to lists
813
+ self.train_score_ = self.train_score_.tolist()
814
+ self.validation_score_ = self.validation_score_.tolist()
815
+
816
+ # Compute raw predictions
817
+ raw_predictions = self._raw_predict(X_binned_train, n_threads=n_threads)
818
+ if self.do_early_stopping_ and need_raw_predictions_val:
819
+ raw_predictions_val = self._raw_predict(
820
+ X_binned_val, n_threads=n_threads
821
+ )
822
+ else:
823
+ raw_predictions_val = None
824
+
825
+ if self.do_early_stopping_ and self.scoring != "loss":
826
+ # Compute the subsample set
827
+ (
828
+ X_binned_small_train,
829
+ y_small_train,
830
+ sample_weight_small_train,
831
+ indices_small_train,
832
+ ) = self._get_small_trainset(
833
+ X_binned_train, y_train, sample_weight_train, self._random_seed
834
+ )
835
+
836
+ # Get the predictors from the previous fit
837
+ predictors = self._predictors
838
+
839
+ begin_at_stage = self.n_iter_
840
+
841
+ # initialize gradients and hessians (empty arrays).
842
+ # shape = (n_samples, n_trees_per_iteration).
843
+ gradient, hessian = self._loss.init_gradient_and_hessian(
844
+ n_samples=n_samples, dtype=G_H_DTYPE, order="F"
845
+ )
846
+
847
+ for iteration in range(begin_at_stage, self.max_iter):
848
+ if self.verbose:
849
+ iteration_start_time = time()
850
+ print(
851
+ "[{}/{}] ".format(iteration + 1, self.max_iter), end="", flush=True
852
+ )
853
+
854
+ # Update gradients and hessians, inplace
855
+ # Note that self._loss expects shape (n_samples,) for
856
+ # n_trees_per_iteration = 1 else shape (n_samples, n_trees_per_iteration).
857
+ if self._loss.constant_hessian:
858
+ self._loss.gradient(
859
+ y_true=y_train,
860
+ raw_prediction=raw_predictions,
861
+ sample_weight=sample_weight_train,
862
+ gradient_out=gradient,
863
+ n_threads=n_threads,
864
+ )
865
+ else:
866
+ self._loss.gradient_hessian(
867
+ y_true=y_train,
868
+ raw_prediction=raw_predictions,
869
+ sample_weight=sample_weight_train,
870
+ gradient_out=gradient,
871
+ hessian_out=hessian,
872
+ n_threads=n_threads,
873
+ )
874
+
875
+ # Append a list since there may be more than 1 predictor per iter
876
+ predictors.append([])
877
+
878
+ # 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
879
+ # on gradient and hessian to simplify the loop over n_trees_per_iteration_.
880
+ if gradient.ndim == 1:
881
+ g_view = gradient.reshape((-1, 1))
882
+ h_view = hessian.reshape((-1, 1))
883
+ else:
884
+ g_view = gradient
885
+ h_view = hessian
886
+
887
+ # Build `n_trees_per_iteration` trees.
888
+ for k in range(self.n_trees_per_iteration_):
889
+ grower = TreeGrower(
890
+ X_binned=X_binned_train,
891
+ gradients=g_view[:, k],
892
+ hessians=h_view[:, k],
893
+ n_bins=n_bins,
894
+ n_bins_non_missing=self._bin_mapper.n_bins_non_missing_,
895
+ has_missing_values=has_missing_values,
896
+ is_categorical=self._is_categorical_remapped,
897
+ monotonic_cst=monotonic_cst,
898
+ interaction_cst=interaction_cst,
899
+ max_leaf_nodes=self.max_leaf_nodes,
900
+ max_depth=self.max_depth,
901
+ min_samples_leaf=self.min_samples_leaf,
902
+ l2_regularization=self.l2_regularization,
903
+ feature_fraction_per_split=self.max_features,
904
+ rng=self._feature_subsample_rng,
905
+ shrinkage=self.learning_rate,
906
+ n_threads=n_threads,
907
+ )
908
+ grower.grow()
909
+
910
+ acc_apply_split_time += grower.total_apply_split_time
911
+ acc_find_split_time += grower.total_find_split_time
912
+ acc_compute_hist_time += grower.total_compute_hist_time
913
+
914
+ if not self._loss.differentiable:
915
+ _update_leaves_values(
916
+ loss=self._loss,
917
+ grower=grower,
918
+ y_true=y_train,
919
+ raw_prediction=raw_predictions[:, k],
920
+ sample_weight=sample_weight_train,
921
+ )
922
+
923
+ predictor = grower.make_predictor(
924
+ binning_thresholds=self._bin_mapper.bin_thresholds_
925
+ )
926
+ predictors[-1].append(predictor)
927
+
928
+ # Update raw_predictions with the predictions of the newly
929
+ # created tree.
930
+ tic_pred = time()
931
+ _update_raw_predictions(raw_predictions[:, k], grower, n_threads)
932
+ toc_pred = time()
933
+ acc_prediction_time += toc_pred - tic_pred
934
+
935
+ should_early_stop = False
936
+ if self.do_early_stopping_:
937
+ # Update raw_predictions_val with the newest tree(s)
938
+ if need_raw_predictions_val:
939
+ for k, pred in enumerate(self._predictors[-1]):
940
+ raw_predictions_val[:, k] += pred.predict_binned(
941
+ X_binned_val,
942
+ self._bin_mapper.missing_values_bin_idx_,
943
+ n_threads,
944
+ )
945
+
946
+ if self.scoring == "loss":
947
+ should_early_stop = self._check_early_stopping_loss(
948
+ raw_predictions=raw_predictions,
949
+ y_train=y_train,
950
+ sample_weight_train=sample_weight_train,
951
+ raw_predictions_val=raw_predictions_val,
952
+ y_val=y_val,
953
+ sample_weight_val=sample_weight_val,
954
+ n_threads=n_threads,
955
+ )
956
+
957
+ else:
958
+ # If the scorer is a predefined string, then we optimize the
959
+ # evaluation by re-using the incrementally computed raw predictions.
960
+ if scoring_is_predefined_string:
961
+ raw_predictions_small_train = raw_predictions[
962
+ indices_small_train
963
+ ]
964
+ else:
965
+ raw_predictions_small_train = None
966
+
967
+ should_early_stop = self._check_early_stopping_scorer(
968
+ X_binned_small_train,
969
+ y_small_train,
970
+ sample_weight_small_train,
971
+ X_binned_val,
972
+ y_val,
973
+ sample_weight_val,
974
+ raw_predictions_small_train=raw_predictions_small_train,
975
+ raw_predictions_val=raw_predictions_val,
976
+ )
977
+
978
+ if self.verbose:
979
+ self._print_iteration_stats(iteration_start_time)
980
+
981
+ # maybe we could also early stop if all the trees are stumps?
982
+ if should_early_stop:
983
+ break
984
+
985
+ if self.verbose:
986
+ duration = time() - fit_start_time
987
+ n_total_leaves = sum(
988
+ predictor.get_n_leaf_nodes()
989
+ for predictors_at_ith_iteration in self._predictors
990
+ for predictor in predictors_at_ith_iteration
991
+ )
992
+ n_predictors = sum(
993
+ len(predictors_at_ith_iteration)
994
+ for predictors_at_ith_iteration in self._predictors
995
+ )
996
+ print(
997
+ "Fit {} trees in {:.3f} s, ({} total leaves)".format(
998
+ n_predictors, duration, n_total_leaves
999
+ )
1000
+ )
1001
+ print(
1002
+ "{:<32} {:.3f}s".format(
1003
+ "Time spent computing histograms:", acc_compute_hist_time
1004
+ )
1005
+ )
1006
+ print(
1007
+ "{:<32} {:.3f}s".format(
1008
+ "Time spent finding best splits:", acc_find_split_time
1009
+ )
1010
+ )
1011
+ print(
1012
+ "{:<32} {:.3f}s".format(
1013
+ "Time spent applying splits:", acc_apply_split_time
1014
+ )
1015
+ )
1016
+ print(
1017
+ "{:<32} {:.3f}s".format("Time spent predicting:", acc_prediction_time)
1018
+ )
1019
+
1020
+ self.train_score_ = np.asarray(self.train_score_)
1021
+ self.validation_score_ = np.asarray(self.validation_score_)
1022
+ del self._in_fit # hard delete so we're sure it can't be used anymore
1023
+ return self
1024
+
1025
+ def _is_fitted(self):
1026
+ return len(getattr(self, "_predictors", [])) > 0
1027
+
1028
+ def _clear_state(self):
1029
+ """Clear the state of the gradient boosting model."""
1030
+ for var in ("train_score_", "validation_score_"):
1031
+ if hasattr(self, var):
1032
+ delattr(self, var)
1033
+
1034
+ def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed):
1035
+ """Compute the indices of the subsample set and return this set.
1036
+
1037
+ For efficiency, we need to subsample the training set to compute scores
1038
+ with scorers.
1039
+ """
1040
+ # TODO: incorporate sample_weights here in `resample`
1041
+ subsample_size = 10000
1042
+ if X_binned_train.shape[0] > subsample_size:
1043
+ indices = np.arange(X_binned_train.shape[0])
1044
+ stratify = y_train if is_classifier(self) else None
1045
+ indices = resample(
1046
+ indices,
1047
+ n_samples=subsample_size,
1048
+ replace=False,
1049
+ random_state=seed,
1050
+ stratify=stratify,
1051
+ )
1052
+ X_binned_small_train = X_binned_train[indices]
1053
+ y_small_train = y_train[indices]
1054
+ if sample_weight_train is not None:
1055
+ sample_weight_small_train = sample_weight_train[indices]
1056
+ else:
1057
+ sample_weight_small_train = None
1058
+ X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
1059
+ return (
1060
+ X_binned_small_train,
1061
+ y_small_train,
1062
+ sample_weight_small_train,
1063
+ indices,
1064
+ )
1065
+ else:
1066
+ return X_binned_train, y_train, sample_weight_train, slice(None)
1067
+
1068
+ def _check_early_stopping_scorer(
1069
+ self,
1070
+ X_binned_small_train,
1071
+ y_small_train,
1072
+ sample_weight_small_train,
1073
+ X_binned_val,
1074
+ y_val,
1075
+ sample_weight_val,
1076
+ raw_predictions_small_train=None,
1077
+ raw_predictions_val=None,
1078
+ ):
1079
+ """Check if fitting should be early-stopped based on scorer.
1080
+
1081
+ Scores are computed on validation data or on training data.
1082
+ """
1083
+ if is_classifier(self):
1084
+ y_small_train = self.classes_[y_small_train.astype(int)]
1085
+
1086
+ self.train_score_.append(
1087
+ self._score_with_raw_predictions(
1088
+ X_binned_small_train,
1089
+ y_small_train,
1090
+ sample_weight_small_train,
1091
+ raw_predictions_small_train,
1092
+ )
1093
+ )
1094
+
1095
+ if self._use_validation_data:
1096
+ if is_classifier(self):
1097
+ y_val = self.classes_[y_val.astype(int)]
1098
+ self.validation_score_.append(
1099
+ self._score_with_raw_predictions(
1100
+ X_binned_val, y_val, sample_weight_val, raw_predictions_val
1101
+ )
1102
+ )
1103
+ return self._should_stop(self.validation_score_)
1104
+ else:
1105
+ return self._should_stop(self.train_score_)
1106
+
1107
+ def _score_with_raw_predictions(self, X, y, sample_weight, raw_predictions=None):
1108
+ if raw_predictions is None:
1109
+ patcher_raw_predict = nullcontext()
1110
+ else:
1111
+ patcher_raw_predict = _patch_raw_predict(self, raw_predictions)
1112
+
1113
+ with patcher_raw_predict:
1114
+ if sample_weight is None:
1115
+ return self._scorer(self, X, y)
1116
+ else:
1117
+ return self._scorer(self, X, y, sample_weight=sample_weight)
1118
+
1119
+ def _check_early_stopping_loss(
1120
+ self,
1121
+ raw_predictions,
1122
+ y_train,
1123
+ sample_weight_train,
1124
+ raw_predictions_val,
1125
+ y_val,
1126
+ sample_weight_val,
1127
+ n_threads=1,
1128
+ ):
1129
+ """Check if fitting should be early-stopped based on loss.
1130
+
1131
+ Scores are computed on validation data or on training data.
1132
+ """
1133
+ self.train_score_.append(
1134
+ -self._loss(
1135
+ y_true=y_train,
1136
+ raw_prediction=raw_predictions,
1137
+ sample_weight=sample_weight_train,
1138
+ n_threads=n_threads,
1139
+ )
1140
+ )
1141
+
1142
+ if self._use_validation_data:
1143
+ self.validation_score_.append(
1144
+ -self._loss(
1145
+ y_true=y_val,
1146
+ raw_prediction=raw_predictions_val,
1147
+ sample_weight=sample_weight_val,
1148
+ n_threads=n_threads,
1149
+ )
1150
+ )
1151
+ return self._should_stop(self.validation_score_)
1152
+ else:
1153
+ return self._should_stop(self.train_score_)
1154
+
1155
+ def _should_stop(self, scores):
1156
+ """
1157
+ Return True (do early stopping) if the last n scores aren't better
1158
+ than the (n-1)th-to-last score, up to some tolerance.
1159
+ """
1160
+ reference_position = self.n_iter_no_change + 1
1161
+ if len(scores) < reference_position:
1162
+ return False
1163
+
1164
+ # A higher score is always better. Higher tol means that it will be
1165
+ # harder for subsequent iteration to be considered an improvement upon
1166
+ # the reference score, and therefore it is more likely to early stop
1167
+ # because of the lack of significant improvement.
1168
+ reference_score = scores[-reference_position] + self.tol
1169
+ recent_scores = scores[-reference_position + 1 :]
1170
+ recent_improvements = [score > reference_score for score in recent_scores]
1171
+ return not any(recent_improvements)
1172
+
1173
+ def _bin_data(self, X, is_training_data):
1174
+ """Bin data X.
1175
+
1176
+ If is_training_data, then fit the _bin_mapper attribute.
1177
+ Else, the binned data is converted to a C-contiguous array.
1178
+ """
1179
+
1180
+ description = "training" if is_training_data else "validation"
1181
+ if self.verbose:
1182
+ print(
1183
+ "Binning {:.3f} GB of {} data: ".format(X.nbytes / 1e9, description),
1184
+ end="",
1185
+ flush=True,
1186
+ )
1187
+ tic = time()
1188
+ if is_training_data:
1189
+ X_binned = self._bin_mapper.fit_transform(X) # F-aligned array
1190
+ else:
1191
+ X_binned = self._bin_mapper.transform(X) # F-aligned array
1192
+ # We convert the array to C-contiguous since predicting is faster
1193
+ # with this layout (training is faster on F-arrays though)
1194
+ X_binned = np.ascontiguousarray(X_binned)
1195
+ toc = time()
1196
+ if self.verbose:
1197
+ duration = toc - tic
1198
+ print("{:.3f} s".format(duration))
1199
+
1200
+ return X_binned
1201
+
1202
+ def _print_iteration_stats(self, iteration_start_time):
1203
+ """Print info about the current fitting iteration."""
1204
+ log_msg = ""
1205
+
1206
+ predictors_of_ith_iteration = [
1207
+ predictors_list
1208
+ for predictors_list in self._predictors[-1]
1209
+ if predictors_list
1210
+ ]
1211
+ n_trees = len(predictors_of_ith_iteration)
1212
+ max_depth = max(
1213
+ predictor.get_max_depth() for predictor in predictors_of_ith_iteration
1214
+ )
1215
+ n_leaves = sum(
1216
+ predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration
1217
+ )
1218
+
1219
+ if n_trees == 1:
1220
+ log_msg += "{} tree, {} leaves, ".format(n_trees, n_leaves)
1221
+ else:
1222
+ log_msg += "{} trees, {} leaves ".format(n_trees, n_leaves)
1223
+ log_msg += "({} on avg), ".format(int(n_leaves / n_trees))
1224
+
1225
+ log_msg += "max depth = {}, ".format(max_depth)
1226
+
1227
+ if self.do_early_stopping_:
1228
+ if self.scoring == "loss":
1229
+ factor = -1 # score_ arrays contain the negative loss
1230
+ name = "loss"
1231
+ else:
1232
+ factor = 1
1233
+ name = "score"
1234
+ log_msg += "train {}: {:.5f}, ".format(name, factor * self.train_score_[-1])
1235
+ if self._use_validation_data:
1236
+ log_msg += "val {}: {:.5f}, ".format(
1237
+ name, factor * self.validation_score_[-1]
1238
+ )
1239
+
1240
+ iteration_time = time() - iteration_start_time
1241
+ log_msg += "in {:0.3f}s".format(iteration_time)
1242
+
1243
+ print(log_msg)
1244
+
1245
+ def _raw_predict(self, X, n_threads=None):
1246
+ """Return the sum of the leaves values over all predictors.
1247
+
1248
+ Parameters
1249
+ ----------
1250
+ X : array-like of shape (n_samples, n_features)
1251
+ The input samples.
1252
+ n_threads : int, default=None
1253
+ Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
1254
+ to determine the effective number of threads use, which takes cgroups CPU
1255
+ quotes into account. See the docstring of `_openmp_effective_n_threads`
1256
+ for details.
1257
+
1258
+ Returns
1259
+ -------
1260
+ raw_predictions : array, shape (n_samples, n_trees_per_iteration)
1261
+ The raw predicted values.
1262
+ """
1263
+ check_is_fitted(self)
1264
+ is_binned = getattr(self, "_in_fit", False)
1265
+ if not is_binned:
1266
+ X = self._preprocess_X(X, reset=False)
1267
+
1268
+ n_samples = X.shape[0]
1269
+ raw_predictions = np.zeros(
1270
+ shape=(n_samples, self.n_trees_per_iteration_),
1271
+ dtype=self._baseline_prediction.dtype,
1272
+ order="F",
1273
+ )
1274
+ raw_predictions += self._baseline_prediction
1275
+
1276
+ # We intentionally decouple the number of threads used at prediction
1277
+ # time from the number of threads used at fit time because the model
1278
+ # can be deployed on a different machine for prediction purposes.
1279
+ n_threads = _openmp_effective_n_threads(n_threads)
1280
+ self._predict_iterations(
1281
+ X, self._predictors, raw_predictions, is_binned, n_threads
1282
+ )
1283
+ return raw_predictions
1284
+
1285
+ def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads):
1286
+ """Add the predictions of the predictors to raw_predictions."""
1287
+ if not is_binned:
1288
+ (
1289
+ known_cat_bitsets,
1290
+ f_idx_map,
1291
+ ) = self._bin_mapper.make_known_categories_bitsets()
1292
+
1293
+ for predictors_of_ith_iteration in predictors:
1294
+ for k, predictor in enumerate(predictors_of_ith_iteration):
1295
+ if is_binned:
1296
+ predict = partial(
1297
+ predictor.predict_binned,
1298
+ missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_,
1299
+ n_threads=n_threads,
1300
+ )
1301
+ else:
1302
+ predict = partial(
1303
+ predictor.predict,
1304
+ known_cat_bitsets=known_cat_bitsets,
1305
+ f_idx_map=f_idx_map,
1306
+ n_threads=n_threads,
1307
+ )
1308
+ raw_predictions[:, k] += predict(X)
1309
+
1310
+ def _staged_raw_predict(self, X):
1311
+ """Compute raw predictions of ``X`` for each iteration.
1312
+
1313
+ This method allows monitoring (i.e. determine error on testing set)
1314
+ after each stage.
1315
+
1316
+ Parameters
1317
+ ----------
1318
+ X : array-like of shape (n_samples, n_features)
1319
+ The input samples.
1320
+
1321
+ Yields
1322
+ ------
1323
+ raw_predictions : generator of ndarray of shape \
1324
+ (n_samples, n_trees_per_iteration)
1325
+ The raw predictions of the input samples. The order of the
1326
+ classes corresponds to that in the attribute :term:`classes_`.
1327
+ """
1328
+ check_is_fitted(self)
1329
+ X = self._preprocess_X(X, reset=False)
1330
+ if X.shape[1] != self._n_features:
1331
+ raise ValueError(
1332
+ "X has {} features but this estimator was trained with "
1333
+ "{} features.".format(X.shape[1], self._n_features)
1334
+ )
1335
+ n_samples = X.shape[0]
1336
+ raw_predictions = np.zeros(
1337
+ shape=(n_samples, self.n_trees_per_iteration_),
1338
+ dtype=self._baseline_prediction.dtype,
1339
+ order="F",
1340
+ )
1341
+ raw_predictions += self._baseline_prediction
1342
+
1343
+ # We intentionally decouple the number of threads used at prediction
1344
+ # time from the number of threads used at fit time because the model
1345
+ # can be deployed on a different machine for prediction purposes.
1346
+ n_threads = _openmp_effective_n_threads()
1347
+ for iteration in range(len(self._predictors)):
1348
+ self._predict_iterations(
1349
+ X,
1350
+ self._predictors[iteration : iteration + 1],
1351
+ raw_predictions,
1352
+ is_binned=False,
1353
+ n_threads=n_threads,
1354
+ )
1355
+ yield raw_predictions.copy()
1356
+
1357
+ def _compute_partial_dependence_recursion(self, grid, target_features):
1358
+ """Fast partial dependence computation.
1359
+
1360
+ Parameters
1361
+ ----------
1362
+ grid : ndarray, shape (n_samples, n_target_features)
1363
+ The grid points on which the partial dependence should be
1364
+ evaluated.
1365
+ target_features : ndarray, shape (n_target_features)
1366
+ The set of target features for which the partial dependence
1367
+ should be evaluated.
1368
+
1369
+ Returns
1370
+ -------
1371
+ averaged_predictions : ndarray, shape \
1372
+ (n_trees_per_iteration, n_samples)
1373
+ The value of the partial dependence function on each grid point.
1374
+ """
1375
+
1376
+ if getattr(self, "_fitted_with_sw", False):
1377
+ raise NotImplementedError(
1378
+ "{} does not support partial dependence "
1379
+ "plots with the 'recursion' method when "
1380
+ "sample weights were given during fit "
1381
+ "time.".format(self.__class__.__name__)
1382
+ )
1383
+
1384
+ grid = np.asarray(grid, dtype=X_DTYPE, order="C")
1385
+ averaged_predictions = np.zeros(
1386
+ (self.n_trees_per_iteration_, grid.shape[0]), dtype=Y_DTYPE
1387
+ )
1388
+
1389
+ for predictors_of_ith_iteration in self._predictors:
1390
+ for k, predictor in enumerate(predictors_of_ith_iteration):
1391
+ predictor.compute_partial_dependence(
1392
+ grid, target_features, averaged_predictions[k]
1393
+ )
1394
+ # Note that the learning rate is already accounted for in the leaves
1395
+ # values.
1396
+
1397
+ return averaged_predictions
1398
+
1399
+ def _more_tags(self):
1400
+ return {"allow_nan": True}
1401
+
1402
+ @abstractmethod
1403
+ def _get_loss(self, sample_weight):
1404
+ pass
1405
+
1406
+ @abstractmethod
1407
+ def _encode_y(self, y=None):
1408
+ pass
1409
+
1410
+ @property
1411
+ def n_iter_(self):
1412
+ """Number of iterations of the boosting process."""
1413
+ check_is_fitted(self)
1414
+ return len(self._predictors)
1415
+
1416
+
1417
+ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
1418
+ """Histogram-based Gradient Boosting Regression Tree.
1419
+
1420
+ This estimator is much faster than
1421
+ :class:`GradientBoostingRegressor<sklearn.ensemble.GradientBoostingRegressor>`
1422
+ for big datasets (n_samples >= 10 000).
1423
+
1424
+ This estimator has native support for missing values (NaNs). During
1425
+ training, the tree grower learns at each split point whether samples
1426
+ with missing values should go to the left or right child, based on the
1427
+ potential gain. When predicting, samples with missing values are
1428
+ assigned to the left or right child consequently. If no missing values
1429
+ were encountered for a given feature during training, then samples with
1430
+ missing values are mapped to whichever child has the most samples.
1431
+
1432
+ This implementation is inspired by
1433
+ `LightGBM <https://github.com/Microsoft/LightGBM>`_.
1434
+
1435
+ Read more in the :ref:`User Guide <histogram_based_gradient_boosting>`.
1436
+
1437
+ .. versionadded:: 0.21
1438
+
1439
+ Parameters
1440
+ ----------
1441
+ loss : {'squared_error', 'absolute_error', 'gamma', 'poisson', 'quantile'}, \
1442
+ default='squared_error'
1443
+ The loss function to use in the boosting process. Note that the
1444
+ "squared error", "gamma" and "poisson" losses actually implement
1445
+ "half least squares loss", "half gamma deviance" and "half poisson
1446
+ deviance" to simplify the computation of the gradient. Furthermore,
1447
+ "gamma" and "poisson" losses internally use a log-link, "gamma"
1448
+ requires ``y > 0`` and "poisson" requires ``y >= 0``.
1449
+ "quantile" uses the pinball loss.
1450
+
1451
+ .. versionchanged:: 0.23
1452
+ Added option 'poisson'.
1453
+
1454
+ .. versionchanged:: 1.1
1455
+ Added option 'quantile'.
1456
+
1457
+ .. versionchanged:: 1.3
1458
+ Added option 'gamma'.
1459
+
1460
+ quantile : float, default=None
1461
+ If loss is "quantile", this parameter specifies which quantile to be estimated
1462
+ and must be between 0 and 1.
1463
+ learning_rate : float, default=0.1
1464
+ The learning rate, also known as *shrinkage*. This is used as a
1465
+ multiplicative factor for the leaves values. Use ``1`` for no
1466
+ shrinkage.
1467
+ max_iter : int, default=100
1468
+ The maximum number of iterations of the boosting process, i.e. the
1469
+ maximum number of trees.
1470
+ max_leaf_nodes : int or None, default=31
1471
+ The maximum number of leaves for each tree. Must be strictly greater
1472
+ than 1. If None, there is no maximum limit.
1473
+ max_depth : int or None, default=None
1474
+ The maximum depth of each tree. The depth of a tree is the number of
1475
+ edges to go from the root to the deepest leaf.
1476
+ Depth isn't constrained by default.
1477
+ min_samples_leaf : int, default=20
1478
+ The minimum number of samples per leaf. For small datasets with less
1479
+ than a few hundred samples, it is recommended to lower this value
1480
+ since only very shallow trees would be built.
1481
+ l2_regularization : float, default=0
1482
+ The L2 regularization parameter. Use ``0`` for no regularization (default).
1483
+ max_features : float, default=1.0
1484
+ Proportion of randomly chosen features in each and every node split.
1485
+ This is a form of regularization, smaller values make the trees weaker
1486
+ learners and might prevent overfitting.
1487
+ If interaction constraints from `interaction_cst` are present, only allowed
1488
+ features are taken into account for the subsampling.
1489
+
1490
+ .. versionadded:: 1.4
1491
+
1492
+ max_bins : int, default=255
1493
+ The maximum number of bins to use for non-missing values. Before
1494
+ training, each feature of the input array `X` is binned into
1495
+ integer-valued bins, which allows for a much faster training stage.
1496
+ Features with a small number of unique values may use less than
1497
+ ``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin
1498
+ is always reserved for missing values. Must be no larger than 255.
1499
+ categorical_features : array-like of {bool, int, str} of shape (n_features) \
1500
+ or shape (n_categorical_features,), default=None
1501
+ Indicates the categorical features.
1502
+
1503
+ - None : no feature will be considered categorical.
1504
+ - boolean array-like : boolean mask indicating categorical features.
1505
+ - integer array-like : integer indices indicating categorical
1506
+ features.
1507
+ - str array-like: names of categorical features (assuming the training
1508
+ data has feature names).
1509
+ - `"from_dtype"`: dataframe columns with dtype "category" are
1510
+ considered to be categorical features. The input must be an object
1511
+ exposing a ``__dataframe__`` method such as pandas or polars
1512
+ DataFrames to use this feature.
1513
+
1514
+ For each categorical feature, there must be at most `max_bins` unique
1515
+ categories. Negative values for categorical features encoded as numeric
1516
+ dtypes are treated as missing values. All categorical values are
1517
+ converted to floating point numbers. This means that categorical values
1518
+ of 1.0 and 1 are treated as the same category.
1519
+
1520
+ Read more in the :ref:`User Guide <categorical_support_gbdt>`.
1521
+
1522
+ .. versionadded:: 0.24
1523
+
1524
+ .. versionchanged:: 1.2
1525
+ Added support for feature names.
1526
+
1527
+ .. versionchanged:: 1.4
1528
+ Added `"from_dtype"` option. The default will change to `"from_dtype"` in
1529
+ v1.6.
1530
+
1531
+ monotonic_cst : array-like of int of shape (n_features) or dict, default=None
1532
+ Monotonic constraint to enforce on each feature are specified using the
1533
+ following integer values:
1534
+
1535
+ - 1: monotonic increase
1536
+ - 0: no constraint
1537
+ - -1: monotonic decrease
1538
+
1539
+ If a dict with str keys, map feature to monotonic constraints by name.
1540
+ If an array, the features are mapped to constraints by position. See
1541
+ :ref:`monotonic_cst_features_names` for a usage example.
1542
+
1543
+ Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
1544
+
1545
+ .. versionadded:: 0.23
1546
+
1547
+ .. versionchanged:: 1.2
1548
+ Accept dict of constraints with feature names as keys.
1549
+
1550
+ interaction_cst : {"pairwise", "no_interactions"} or sequence of lists/tuples/sets \
1551
+ of int, default=None
1552
+ Specify interaction constraints, the sets of features which can
1553
+ interact with each other in child node splits.
1554
+
1555
+ Each item specifies the set of feature indices that are allowed
1556
+ to interact with each other. If there are more features than
1557
+ specified in these constraints, they are treated as if they were
1558
+ specified as an additional set.
1559
+
1560
+ The strings "pairwise" and "no_interactions" are shorthands for
1561
+ allowing only pairwise or no interactions, respectively.
1562
+
1563
+ For instance, with 5 features in total, `interaction_cst=[{0, 1}]`
1564
+ is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`,
1565
+ and specifies that each branch of a tree will either only split
1566
+ on features 0 and 1 or only split on features 2, 3 and 4.
1567
+
1568
+ .. versionadded:: 1.2
1569
+
1570
+ warm_start : bool, default=False
1571
+ When set to ``True``, reuse the solution of the previous call to fit
1572
+ and add more estimators to the ensemble. For results to be valid, the
1573
+ estimator should be re-trained on the same data only.
1574
+ See :term:`the Glossary <warm_start>`.
1575
+ early_stopping : 'auto' or bool, default='auto'
1576
+ If 'auto', early stopping is enabled if the sample size is larger than
1577
+ 10000. If True, early stopping is enabled, otherwise early stopping is
1578
+ disabled.
1579
+
1580
+ .. versionadded:: 0.23
1581
+
1582
+ scoring : str or callable or None, default='loss'
1583
+ Scoring parameter to use for early stopping. It can be a single
1584
+ string (see :ref:`scoring_parameter`) or a callable (see
1585
+ :ref:`scoring`). If None, the estimator's default scorer is used. If
1586
+ ``scoring='loss'``, early stopping is checked w.r.t the loss value.
1587
+ Only used if early stopping is performed.
1588
+ validation_fraction : int or float or None, default=0.1
1589
+ Proportion (or absolute size) of training data to set aside as
1590
+ validation data for early stopping. If None, early stopping is done on
1591
+ the training data. Only used if early stopping is performed.
1592
+ n_iter_no_change : int, default=10
1593
+ Used to determine when to "early stop". The fitting process is
1594
+ stopped when none of the last ``n_iter_no_change`` scores are better
1595
+ than the ``n_iter_no_change - 1`` -th-to-last one, up to some
1596
+ tolerance. Only used if early stopping is performed.
1597
+ tol : float, default=1e-7
1598
+ The absolute tolerance to use when comparing scores during early
1599
+ stopping. The higher the tolerance, the more likely we are to early
1600
+ stop: higher tolerance means that it will be harder for subsequent
1601
+ iterations to be considered an improvement upon the reference score.
1602
+ verbose : int, default=0
1603
+ The verbosity level. If not zero, print some information about the
1604
+ fitting process.
1605
+ random_state : int, RandomState instance or None, default=None
1606
+ Pseudo-random number generator to control the subsampling in the
1607
+ binning process, and the train/validation data split if early stopping
1608
+ is enabled.
1609
+ Pass an int for reproducible output across multiple function calls.
1610
+ See :term:`Glossary <random_state>`.
1611
+
1612
+ Attributes
1613
+ ----------
1614
+ do_early_stopping_ : bool
1615
+ Indicates whether early stopping is used during training.
1616
+ n_iter_ : int
1617
+ The number of iterations as selected by early stopping, depending on
1618
+ the `early_stopping` parameter. Otherwise it corresponds to max_iter.
1619
+ n_trees_per_iteration_ : int
1620
+ The number of tree that are built at each iteration. For regressors,
1621
+ this is always 1.
1622
+ train_score_ : ndarray, shape (n_iter_+1,)
1623
+ The scores at each iteration on the training data. The first entry
1624
+ is the score of the ensemble before the first iteration. Scores are
1625
+ computed according to the ``scoring`` parameter. If ``scoring`` is
1626
+ not 'loss', scores are computed on a subset of at most 10 000
1627
+ samples. Empty if no early stopping.
1628
+ validation_score_ : ndarray, shape (n_iter_+1,)
1629
+ The scores at each iteration on the held-out validation data. The
1630
+ first entry is the score of the ensemble before the first iteration.
1631
+ Scores are computed according to the ``scoring`` parameter. Empty if
1632
+ no early stopping or if ``validation_fraction`` is None.
1633
+ is_categorical_ : ndarray, shape (n_features, ) or None
1634
+ Boolean mask for the categorical features. ``None`` if there are no
1635
+ categorical features.
1636
+ n_features_in_ : int
1637
+ Number of features seen during :term:`fit`.
1638
+
1639
+ .. versionadded:: 0.24
1640
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1641
+ Names of features seen during :term:`fit`. Defined only when `X`
1642
+ has feature names that are all strings.
1643
+
1644
+ .. versionadded:: 1.0
1645
+
1646
+ See Also
1647
+ --------
1648
+ GradientBoostingRegressor : Exact gradient boosting method that does not
1649
+ scale as good on datasets with a large number of samples.
1650
+ sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
1651
+ RandomForestRegressor : A meta-estimator that fits a number of decision
1652
+ tree regressors on various sub-samples of the dataset and uses
1653
+ averaging to improve the statistical performance and control
1654
+ over-fitting.
1655
+ AdaBoostRegressor : A meta-estimator that begins by fitting a regressor
1656
+ on the original dataset and then fits additional copies of the
1657
+ regressor on the same dataset but where the weights of instances are
1658
+ adjusted according to the error of the current prediction. As such,
1659
+ subsequent regressors focus more on difficult cases.
1660
+
1661
+ Examples
1662
+ --------
1663
+ >>> from sklearn.ensemble import HistGradientBoostingRegressor
1664
+ >>> from sklearn.datasets import load_diabetes
1665
+ >>> X, y = load_diabetes(return_X_y=True)
1666
+ >>> est = HistGradientBoostingRegressor().fit(X, y)
1667
+ >>> est.score(X, y)
1668
+ 0.92...
1669
+ """
1670
+
1671
+ _parameter_constraints: dict = {
1672
+ **BaseHistGradientBoosting._parameter_constraints,
1673
+ "loss": [
1674
+ StrOptions(
1675
+ {
1676
+ "squared_error",
1677
+ "absolute_error",
1678
+ "poisson",
1679
+ "gamma",
1680
+ "quantile",
1681
+ }
1682
+ ),
1683
+ BaseLoss,
1684
+ ],
1685
+ "quantile": [Interval(Real, 0, 1, closed="both"), None],
1686
+ }
1687
+
1688
+ def __init__(
1689
+ self,
1690
+ loss="squared_error",
1691
+ *,
1692
+ quantile=None,
1693
+ learning_rate=0.1,
1694
+ max_iter=100,
1695
+ max_leaf_nodes=31,
1696
+ max_depth=None,
1697
+ min_samples_leaf=20,
1698
+ l2_regularization=0.0,
1699
+ max_features=1.0,
1700
+ max_bins=255,
1701
+ categorical_features="warn",
1702
+ monotonic_cst=None,
1703
+ interaction_cst=None,
1704
+ warm_start=False,
1705
+ early_stopping="auto",
1706
+ scoring="loss",
1707
+ validation_fraction=0.1,
1708
+ n_iter_no_change=10,
1709
+ tol=1e-7,
1710
+ verbose=0,
1711
+ random_state=None,
1712
+ ):
1713
+ super(HistGradientBoostingRegressor, self).__init__(
1714
+ loss=loss,
1715
+ learning_rate=learning_rate,
1716
+ max_iter=max_iter,
1717
+ max_leaf_nodes=max_leaf_nodes,
1718
+ max_depth=max_depth,
1719
+ min_samples_leaf=min_samples_leaf,
1720
+ l2_regularization=l2_regularization,
1721
+ max_features=max_features,
1722
+ max_bins=max_bins,
1723
+ monotonic_cst=monotonic_cst,
1724
+ interaction_cst=interaction_cst,
1725
+ categorical_features=categorical_features,
1726
+ early_stopping=early_stopping,
1727
+ warm_start=warm_start,
1728
+ scoring=scoring,
1729
+ validation_fraction=validation_fraction,
1730
+ n_iter_no_change=n_iter_no_change,
1731
+ tol=tol,
1732
+ verbose=verbose,
1733
+ random_state=random_state,
1734
+ )
1735
+ self.quantile = quantile
1736
+
1737
+ def predict(self, X):
1738
+ """Predict values for X.
1739
+
1740
+ Parameters
1741
+ ----------
1742
+ X : array-like, shape (n_samples, n_features)
1743
+ The input samples.
1744
+
1745
+ Returns
1746
+ -------
1747
+ y : ndarray, shape (n_samples,)
1748
+ The predicted values.
1749
+ """
1750
+ check_is_fitted(self)
1751
+ # Return inverse link of raw predictions after converting
1752
+ # shape (n_samples, 1) to (n_samples,)
1753
+ return self._loss.link.inverse(self._raw_predict(X).ravel())
1754
+
1755
+ def staged_predict(self, X):
1756
+ """Predict regression target for each iteration.
1757
+
1758
+ This method allows monitoring (i.e. determine error on testing set)
1759
+ after each stage.
1760
+
1761
+ .. versionadded:: 0.24
1762
+
1763
+ Parameters
1764
+ ----------
1765
+ X : array-like of shape (n_samples, n_features)
1766
+ The input samples.
1767
+
1768
+ Yields
1769
+ ------
1770
+ y : generator of ndarray of shape (n_samples,)
1771
+ The predicted values of the input samples, for each iteration.
1772
+ """
1773
+ for raw_predictions in self._staged_raw_predict(X):
1774
+ yield self._loss.link.inverse(raw_predictions.ravel())
1775
+
1776
+ def _encode_y(self, y):
1777
+ # Just convert y to the expected dtype
1778
+ self.n_trees_per_iteration_ = 1
1779
+ y = y.astype(Y_DTYPE, copy=False)
1780
+ if self.loss == "gamma":
1781
+ # Ensure y > 0
1782
+ if not np.all(y > 0):
1783
+ raise ValueError("loss='gamma' requires strictly positive y.")
1784
+ elif self.loss == "poisson":
1785
+ # Ensure y >= 0 and sum(y) > 0
1786
+ if not (np.all(y >= 0) and np.sum(y) > 0):
1787
+ raise ValueError(
1788
+ "loss='poisson' requires non-negative y and sum(y) > 0."
1789
+ )
1790
+ return y
1791
+
1792
+ def _get_loss(self, sample_weight):
1793
+ if self.loss == "quantile":
1794
+ return _LOSSES[self.loss](
1795
+ sample_weight=sample_weight, quantile=self.quantile
1796
+ )
1797
+ else:
1798
+ return _LOSSES[self.loss](sample_weight=sample_weight)
1799
+
1800
+
1801
+ class HistGradientBoostingClassifier(ClassifierMixin, BaseHistGradientBoosting):
1802
+ """Histogram-based Gradient Boosting Classification Tree.
1803
+
1804
+ This estimator is much faster than
1805
+ :class:`GradientBoostingClassifier<sklearn.ensemble.GradientBoostingClassifier>`
1806
+ for big datasets (n_samples >= 10 000).
1807
+
1808
+ This estimator has native support for missing values (NaNs). During
1809
+ training, the tree grower learns at each split point whether samples
1810
+ with missing values should go to the left or right child, based on the
1811
+ potential gain. When predicting, samples with missing values are
1812
+ assigned to the left or right child consequently. If no missing values
1813
+ were encountered for a given feature during training, then samples with
1814
+ missing values are mapped to whichever child has the most samples.
1815
+
1816
+ This implementation is inspired by
1817
+ `LightGBM <https://github.com/Microsoft/LightGBM>`_.
1818
+
1819
+ Read more in the :ref:`User Guide <histogram_based_gradient_boosting>`.
1820
+
1821
+ .. versionadded:: 0.21
1822
+
1823
+ Parameters
1824
+ ----------
1825
+ loss : {'log_loss'}, default='log_loss'
1826
+ The loss function to use in the boosting process.
1827
+
1828
+ For binary classification problems, 'log_loss' is also known as logistic loss,
1829
+ binomial deviance or binary crossentropy. Internally, the model fits one tree
1830
+ per boosting iteration and uses the logistic sigmoid function (expit) as
1831
+ inverse link function to compute the predicted positive class probability.
1832
+
1833
+ For multiclass classification problems, 'log_loss' is also known as multinomial
1834
+ deviance or categorical crossentropy. Internally, the model fits one tree per
1835
+ boosting iteration and per class and uses the softmax function as inverse link
1836
+ function to compute the predicted probabilities of the classes.
1837
+
1838
+ learning_rate : float, default=0.1
1839
+ The learning rate, also known as *shrinkage*. This is used as a
1840
+ multiplicative factor for the leaves values. Use ``1`` for no
1841
+ shrinkage.
1842
+ max_iter : int, default=100
1843
+ The maximum number of iterations of the boosting process, i.e. the
1844
+ maximum number of trees for binary classification. For multiclass
1845
+ classification, `n_classes` trees per iteration are built.
1846
+ max_leaf_nodes : int or None, default=31
1847
+ The maximum number of leaves for each tree. Must be strictly greater
1848
+ than 1. If None, there is no maximum limit.
1849
+ max_depth : int or None, default=None
1850
+ The maximum depth of each tree. The depth of a tree is the number of
1851
+ edges to go from the root to the deepest leaf.
1852
+ Depth isn't constrained by default.
1853
+ min_samples_leaf : int, default=20
1854
+ The minimum number of samples per leaf. For small datasets with less
1855
+ than a few hundred samples, it is recommended to lower this value
1856
+ since only very shallow trees would be built.
1857
+ l2_regularization : float, default=0
1858
+ The L2 regularization parameter. Use ``0`` for no regularization (default).
1859
+ max_features : float, default=1.0
1860
+ Proportion of randomly chosen features in each and every node split.
1861
+ This is a form of regularization, smaller values make the trees weaker
1862
+ learners and might prevent overfitting.
1863
+ If interaction constraints from `interaction_cst` are present, only allowed
1864
+ features are taken into account for the subsampling.
1865
+
1866
+ .. versionadded:: 1.4
1867
+
1868
+ max_bins : int, default=255
1869
+ The maximum number of bins to use for non-missing values. Before
1870
+ training, each feature of the input array `X` is binned into
1871
+ integer-valued bins, which allows for a much faster training stage.
1872
+ Features with a small number of unique values may use less than
1873
+ ``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin
1874
+ is always reserved for missing values. Must be no larger than 255.
1875
+ categorical_features : array-like of {bool, int, str} of shape (n_features) \
1876
+ or shape (n_categorical_features,), default=None
1877
+ Indicates the categorical features.
1878
+
1879
+ - None : no feature will be considered categorical.
1880
+ - boolean array-like : boolean mask indicating categorical features.
1881
+ - integer array-like : integer indices indicating categorical
1882
+ features.
1883
+ - str array-like: names of categorical features (assuming the training
1884
+ data has feature names).
1885
+ - `"from_dtype"`: dataframe columns with dtype "category" are
1886
+ considered to be categorical features. The input must be an object
1887
+ exposing a ``__dataframe__`` method such as pandas or polars
1888
+ DataFrames to use this feature.
1889
+
1890
+ For each categorical feature, there must be at most `max_bins` unique
1891
+ categories. Negative values for categorical features encoded as numeric
1892
+ dtypes are treated as missing values. All categorical values are
1893
+ converted to floating point numbers. This means that categorical values
1894
+ of 1.0 and 1 are treated as the same category.
1895
+
1896
+ Read more in the :ref:`User Guide <categorical_support_gbdt>`.
1897
+
1898
+ .. versionadded:: 0.24
1899
+
1900
+ .. versionchanged:: 1.2
1901
+ Added support for feature names.
1902
+
1903
+ .. versionchanged:: 1.4
1904
+ Added `"from_dtype"` option. The default will change to `"from_dtype"` in
1905
+ v1.6.
1906
+
1907
+ monotonic_cst : array-like of int of shape (n_features) or dict, default=None
1908
+ Monotonic constraint to enforce on each feature are specified using the
1909
+ following integer values:
1910
+
1911
+ - 1: monotonic increase
1912
+ - 0: no constraint
1913
+ - -1: monotonic decrease
1914
+
1915
+ If a dict with str keys, map feature to monotonic constraints by name.
1916
+ If an array, the features are mapped to constraints by position. See
1917
+ :ref:`monotonic_cst_features_names` for a usage example.
1918
+
1919
+ The constraints are only valid for binary classifications and hold
1920
+ over the probability of the positive class.
1921
+ Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
1922
+
1923
+ .. versionadded:: 0.23
1924
+
1925
+ .. versionchanged:: 1.2
1926
+ Accept dict of constraints with feature names as keys.
1927
+
1928
+ interaction_cst : {"pairwise", "no_interactions"} or sequence of lists/tuples/sets \
1929
+ of int, default=None
1930
+ Specify interaction constraints, the sets of features which can
1931
+ interact with each other in child node splits.
1932
+
1933
+ Each item specifies the set of feature indices that are allowed
1934
+ to interact with each other. If there are more features than
1935
+ specified in these constraints, they are treated as if they were
1936
+ specified as an additional set.
1937
+
1938
+ The strings "pairwise" and "no_interactions" are shorthands for
1939
+ allowing only pairwise or no interactions, respectively.
1940
+
1941
+ For instance, with 5 features in total, `interaction_cst=[{0, 1}]`
1942
+ is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`,
1943
+ and specifies that each branch of a tree will either only split
1944
+ on features 0 and 1 or only split on features 2, 3 and 4.
1945
+
1946
+ .. versionadded:: 1.2
1947
+
1948
+ warm_start : bool, default=False
1949
+ When set to ``True``, reuse the solution of the previous call to fit
1950
+ and add more estimators to the ensemble. For results to be valid, the
1951
+ estimator should be re-trained on the same data only.
1952
+ See :term:`the Glossary <warm_start>`.
1953
+ early_stopping : 'auto' or bool, default='auto'
1954
+ If 'auto', early stopping is enabled if the sample size is larger than
1955
+ 10000. If True, early stopping is enabled, otherwise early stopping is
1956
+ disabled.
1957
+
1958
+ .. versionadded:: 0.23
1959
+
1960
+ scoring : str or callable or None, default='loss'
1961
+ Scoring parameter to use for early stopping. It can be a single
1962
+ string (see :ref:`scoring_parameter`) or a callable (see
1963
+ :ref:`scoring`). If None, the estimator's default scorer
1964
+ is used. If ``scoring='loss'``, early stopping is checked
1965
+ w.r.t the loss value. Only used if early stopping is performed.
1966
+ validation_fraction : int or float or None, default=0.1
1967
+ Proportion (or absolute size) of training data to set aside as
1968
+ validation data for early stopping. If None, early stopping is done on
1969
+ the training data. Only used if early stopping is performed.
1970
+ n_iter_no_change : int, default=10
1971
+ Used to determine when to "early stop". The fitting process is
1972
+ stopped when none of the last ``n_iter_no_change`` scores are better
1973
+ than the ``n_iter_no_change - 1`` -th-to-last one, up to some
1974
+ tolerance. Only used if early stopping is performed.
1975
+ tol : float, default=1e-7
1976
+ The absolute tolerance to use when comparing scores. The higher the
1977
+ tolerance, the more likely we are to early stop: higher tolerance
1978
+ means that it will be harder for subsequent iterations to be
1979
+ considered an improvement upon the reference score.
1980
+ verbose : int, default=0
1981
+ The verbosity level. If not zero, print some information about the
1982
+ fitting process.
1983
+ random_state : int, RandomState instance or None, default=None
1984
+ Pseudo-random number generator to control the subsampling in the
1985
+ binning process, and the train/validation data split if early stopping
1986
+ is enabled.
1987
+ Pass an int for reproducible output across multiple function calls.
1988
+ See :term:`Glossary <random_state>`.
1989
+ class_weight : dict or 'balanced', default=None
1990
+ Weights associated with classes in the form `{class_label: weight}`.
1991
+ If not given, all classes are supposed to have weight one.
1992
+ The "balanced" mode uses the values of y to automatically adjust
1993
+ weights inversely proportional to class frequencies in the input data
1994
+ as `n_samples / (n_classes * np.bincount(y))`.
1995
+ Note that these weights will be multiplied with sample_weight (passed
1996
+ through the fit method) if `sample_weight` is specified.
1997
+
1998
+ .. versionadded:: 1.2
1999
+
2000
+ Attributes
2001
+ ----------
2002
+ classes_ : array, shape = (n_classes,)
2003
+ Class labels.
2004
+ do_early_stopping_ : bool
2005
+ Indicates whether early stopping is used during training.
2006
+ n_iter_ : int
2007
+ The number of iterations as selected by early stopping, depending on
2008
+ the `early_stopping` parameter. Otherwise it corresponds to max_iter.
2009
+ n_trees_per_iteration_ : int
2010
+ The number of tree that are built at each iteration. This is equal to 1
2011
+ for binary classification, and to ``n_classes`` for multiclass
2012
+ classification.
2013
+ train_score_ : ndarray, shape (n_iter_+1,)
2014
+ The scores at each iteration on the training data. The first entry
2015
+ is the score of the ensemble before the first iteration. Scores are
2016
+ computed according to the ``scoring`` parameter. If ``scoring`` is
2017
+ not 'loss', scores are computed on a subset of at most 10 000
2018
+ samples. Empty if no early stopping.
2019
+ validation_score_ : ndarray, shape (n_iter_+1,)
2020
+ The scores at each iteration on the held-out validation data. The
2021
+ first entry is the score of the ensemble before the first iteration.
2022
+ Scores are computed according to the ``scoring`` parameter. Empty if
2023
+ no early stopping or if ``validation_fraction`` is None.
2024
+ is_categorical_ : ndarray, shape (n_features, ) or None
2025
+ Boolean mask for the categorical features. ``None`` if there are no
2026
+ categorical features.
2027
+ n_features_in_ : int
2028
+ Number of features seen during :term:`fit`.
2029
+
2030
+ .. versionadded:: 0.24
2031
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
2032
+ Names of features seen during :term:`fit`. Defined only when `X`
2033
+ has feature names that are all strings.
2034
+
2035
+ .. versionadded:: 1.0
2036
+
2037
+ See Also
2038
+ --------
2039
+ GradientBoostingClassifier : Exact gradient boosting method that does not
2040
+ scale as good on datasets with a large number of samples.
2041
+ sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
2042
+ RandomForestClassifier : A meta-estimator that fits a number of decision
2043
+ tree classifiers on various sub-samples of the dataset and uses
2044
+ averaging to improve the predictive accuracy and control over-fitting.
2045
+ AdaBoostClassifier : A meta-estimator that begins by fitting a classifier
2046
+ on the original dataset and then fits additional copies of the
2047
+ classifier on the same dataset where the weights of incorrectly
2048
+ classified instances are adjusted such that subsequent classifiers
2049
+ focus more on difficult cases.
2050
+
2051
+ Examples
2052
+ --------
2053
+ >>> from sklearn.ensemble import HistGradientBoostingClassifier
2054
+ >>> from sklearn.datasets import load_iris
2055
+ >>> X, y = load_iris(return_X_y=True)
2056
+ >>> clf = HistGradientBoostingClassifier().fit(X, y)
2057
+ >>> clf.score(X, y)
2058
+ 1.0
2059
+ """
2060
+
2061
+ _parameter_constraints: dict = {
2062
+ **BaseHistGradientBoosting._parameter_constraints,
2063
+ "loss": [StrOptions({"log_loss"}), BaseLoss],
2064
+ "class_weight": [dict, StrOptions({"balanced"}), None],
2065
+ }
2066
+
2067
+ def __init__(
2068
+ self,
2069
+ loss="log_loss",
2070
+ *,
2071
+ learning_rate=0.1,
2072
+ max_iter=100,
2073
+ max_leaf_nodes=31,
2074
+ max_depth=None,
2075
+ min_samples_leaf=20,
2076
+ l2_regularization=0.0,
2077
+ max_features=1.0,
2078
+ max_bins=255,
2079
+ categorical_features="warn",
2080
+ monotonic_cst=None,
2081
+ interaction_cst=None,
2082
+ warm_start=False,
2083
+ early_stopping="auto",
2084
+ scoring="loss",
2085
+ validation_fraction=0.1,
2086
+ n_iter_no_change=10,
2087
+ tol=1e-7,
2088
+ verbose=0,
2089
+ random_state=None,
2090
+ class_weight=None,
2091
+ ):
2092
+ super(HistGradientBoostingClassifier, self).__init__(
2093
+ loss=loss,
2094
+ learning_rate=learning_rate,
2095
+ max_iter=max_iter,
2096
+ max_leaf_nodes=max_leaf_nodes,
2097
+ max_depth=max_depth,
2098
+ min_samples_leaf=min_samples_leaf,
2099
+ l2_regularization=l2_regularization,
2100
+ max_features=max_features,
2101
+ max_bins=max_bins,
2102
+ categorical_features=categorical_features,
2103
+ monotonic_cst=monotonic_cst,
2104
+ interaction_cst=interaction_cst,
2105
+ warm_start=warm_start,
2106
+ early_stopping=early_stopping,
2107
+ scoring=scoring,
2108
+ validation_fraction=validation_fraction,
2109
+ n_iter_no_change=n_iter_no_change,
2110
+ tol=tol,
2111
+ verbose=verbose,
2112
+ random_state=random_state,
2113
+ )
2114
+ self.class_weight = class_weight
2115
+
2116
+ def _finalize_sample_weight(self, sample_weight, y):
2117
+ """Adjust sample_weights with class_weights."""
2118
+ if self.class_weight is None:
2119
+ return sample_weight
2120
+
2121
+ expanded_class_weight = compute_sample_weight(self.class_weight, y)
2122
+
2123
+ if sample_weight is not None:
2124
+ return sample_weight * expanded_class_weight
2125
+ else:
2126
+ return expanded_class_weight
2127
+
2128
+ def predict(self, X):
2129
+ """Predict classes for X.
2130
+
2131
+ Parameters
2132
+ ----------
2133
+ X : array-like, shape (n_samples, n_features)
2134
+ The input samples.
2135
+
2136
+ Returns
2137
+ -------
2138
+ y : ndarray, shape (n_samples,)
2139
+ The predicted classes.
2140
+ """
2141
+ # TODO: This could be done in parallel
2142
+ encoded_classes = np.argmax(self.predict_proba(X), axis=1)
2143
+ return self.classes_[encoded_classes]
2144
+
2145
+ def staged_predict(self, X):
2146
+ """Predict classes at each iteration.
2147
+
2148
+ This method allows monitoring (i.e. determine error on testing set)
2149
+ after each stage.
2150
+
2151
+ .. versionadded:: 0.24
2152
+
2153
+ Parameters
2154
+ ----------
2155
+ X : array-like of shape (n_samples, n_features)
2156
+ The input samples.
2157
+
2158
+ Yields
2159
+ ------
2160
+ y : generator of ndarray of shape (n_samples,)
2161
+ The predicted classes of the input samples, for each iteration.
2162
+ """
2163
+ for proba in self.staged_predict_proba(X):
2164
+ encoded_classes = np.argmax(proba, axis=1)
2165
+ yield self.classes_.take(encoded_classes, axis=0)
2166
+
2167
+ def predict_proba(self, X):
2168
+ """Predict class probabilities for X.
2169
+
2170
+ Parameters
2171
+ ----------
2172
+ X : array-like, shape (n_samples, n_features)
2173
+ The input samples.
2174
+
2175
+ Returns
2176
+ -------
2177
+ p : ndarray, shape (n_samples, n_classes)
2178
+ The class probabilities of the input samples.
2179
+ """
2180
+ raw_predictions = self._raw_predict(X)
2181
+ return self._loss.predict_proba(raw_predictions)
2182
+
2183
+ def staged_predict_proba(self, X):
2184
+ """Predict class probabilities at each iteration.
2185
+
2186
+ This method allows monitoring (i.e. determine error on testing set)
2187
+ after each stage.
2188
+
2189
+ Parameters
2190
+ ----------
2191
+ X : array-like of shape (n_samples, n_features)
2192
+ The input samples.
2193
+
2194
+ Yields
2195
+ ------
2196
+ y : generator of ndarray of shape (n_samples,)
2197
+ The predicted class probabilities of the input samples,
2198
+ for each iteration.
2199
+ """
2200
+ for raw_predictions in self._staged_raw_predict(X):
2201
+ yield self._loss.predict_proba(raw_predictions)
2202
+
2203
+ def decision_function(self, X):
2204
+ """Compute the decision function of ``X``.
2205
+
2206
+ Parameters
2207
+ ----------
2208
+ X : array-like, shape (n_samples, n_features)
2209
+ The input samples.
2210
+
2211
+ Returns
2212
+ -------
2213
+ decision : ndarray, shape (n_samples,) or \
2214
+ (n_samples, n_trees_per_iteration)
2215
+ The raw predicted values (i.e. the sum of the trees leaves) for
2216
+ each sample. n_trees_per_iteration is equal to the number of
2217
+ classes in multiclass classification.
2218
+ """
2219
+ decision = self._raw_predict(X)
2220
+ if decision.shape[1] == 1:
2221
+ decision = decision.ravel()
2222
+ return decision
2223
+
2224
+ def staged_decision_function(self, X):
2225
+ """Compute decision function of ``X`` for each iteration.
2226
+
2227
+ This method allows monitoring (i.e. determine error on testing set)
2228
+ after each stage.
2229
+
2230
+ Parameters
2231
+ ----------
2232
+ X : array-like of shape (n_samples, n_features)
2233
+ The input samples.
2234
+
2235
+ Yields
2236
+ ------
2237
+ decision : generator of ndarray of shape (n_samples,) or \
2238
+ (n_samples, n_trees_per_iteration)
2239
+ The decision function of the input samples, which corresponds to
2240
+ the raw values predicted from the trees of the ensemble . The
2241
+ classes corresponds to that in the attribute :term:`classes_`.
2242
+ """
2243
+ for staged_decision in self._staged_raw_predict(X):
2244
+ if staged_decision.shape[1] == 1:
2245
+ staged_decision = staged_decision.ravel()
2246
+ yield staged_decision
2247
+
2248
+ def _encode_y(self, y):
2249
+ # encode classes into 0 ... n_classes - 1 and sets attributes classes_
2250
+ # and n_trees_per_iteration_
2251
+ check_classification_targets(y)
2252
+
2253
+ label_encoder = LabelEncoder()
2254
+ encoded_y = label_encoder.fit_transform(y)
2255
+ self.classes_ = label_encoder.classes_
2256
+ n_classes = self.classes_.shape[0]
2257
+ # only 1 tree for binary classification. For multiclass classification,
2258
+ # we build 1 tree per class.
2259
+ self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes
2260
+ encoded_y = encoded_y.astype(Y_DTYPE, copy=False)
2261
+ return encoded_y
2262
+
2263
+ def _get_loss(self, sample_weight):
2264
+ # At this point self.loss == "log_loss"
2265
+ if self.n_trees_per_iteration_ == 1:
2266
+ return HalfBinomialLoss(sample_weight=sample_weight)
2267
+ else:
2268
+ return HalfMultinomialLoss(
2269
+ sample_weight=sample_weight, n_classes=self.n_trees_per_iteration_
2270
+ )
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (328 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the TreePredictor class which is used for prediction.
3
+ """
4
+ # Author: Nicolas Hug
5
+
6
+ import numpy as np
7
+
8
+ from ._predictor import (
9
+ _compute_partial_dependence,
10
+ _predict_from_binned_data,
11
+ _predict_from_raw_data,
12
+ )
13
+ from .common import PREDICTOR_RECORD_DTYPE, Y_DTYPE
14
+
15
+
16
+ class TreePredictor:
17
+ """Tree class used for predictions.
18
+
19
+ Parameters
20
+ ----------
21
+ nodes : ndarray of PREDICTOR_RECORD_DTYPE
22
+ The nodes of the tree.
23
+ binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
24
+ Array of bitsets for binned categories used in predict_binned when a
25
+ split is categorical.
26
+ raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
27
+ Array of bitsets for raw categories used in predict when a split is
28
+ categorical.
29
+ """
30
+
31
+ def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
32
+ self.nodes = nodes
33
+ self.binned_left_cat_bitsets = binned_left_cat_bitsets
34
+ self.raw_left_cat_bitsets = raw_left_cat_bitsets
35
+
36
+ def get_n_leaf_nodes(self):
37
+ """Return number of leaves."""
38
+ return int(self.nodes["is_leaf"].sum())
39
+
40
+ def get_max_depth(self):
41
+ """Return maximum depth among all leaves."""
42
+ return int(self.nodes["depth"].max())
43
+
44
+ def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
45
+ """Predict raw values for non-binned data.
46
+
47
+ Parameters
48
+ ----------
49
+ X : ndarray, shape (n_samples, n_features)
50
+ The input samples.
51
+
52
+ known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
53
+ Array of bitsets of known categories, for each categorical feature.
54
+
55
+ f_idx_map : ndarray of shape (n_features,)
56
+ Map from original feature index to the corresponding index in the
57
+ known_cat_bitsets array.
58
+
59
+ n_threads : int
60
+ Number of OpenMP threads to use.
61
+
62
+ Returns
63
+ -------
64
+ y : ndarray, shape (n_samples,)
65
+ The raw predicted values.
66
+ """
67
+ out = np.empty(X.shape[0], dtype=Y_DTYPE)
68
+
69
+ _predict_from_raw_data(
70
+ self.nodes,
71
+ X,
72
+ self.raw_left_cat_bitsets,
73
+ known_cat_bitsets,
74
+ f_idx_map,
75
+ n_threads,
76
+ out,
77
+ )
78
+ return out
79
+
80
+ def predict_binned(self, X, missing_values_bin_idx, n_threads):
81
+ """Predict raw values for binned data.
82
+
83
+ Parameters
84
+ ----------
85
+ X : ndarray, shape (n_samples, n_features)
86
+ The input samples.
87
+ missing_values_bin_idx : uint8
88
+ Index of the bin that is used for missing values. This is the
89
+ index of the last bin and is always equal to max_bins (as passed
90
+ to the GBDT classes), or equivalently to n_bins - 1.
91
+ n_threads : int
92
+ Number of OpenMP threads to use.
93
+
94
+ Returns
95
+ -------
96
+ y : ndarray, shape (n_samples,)
97
+ The raw predicted values.
98
+ """
99
+ out = np.empty(X.shape[0], dtype=Y_DTYPE)
100
+ _predict_from_binned_data(
101
+ self.nodes,
102
+ X,
103
+ self.binned_left_cat_bitsets,
104
+ missing_values_bin_idx,
105
+ n_threads,
106
+ out,
107
+ )
108
+ return out
109
+
110
+ def compute_partial_dependence(self, grid, target_features, out):
111
+ """Fast partial dependence computation.
112
+
113
+ Parameters
114
+ ----------
115
+ grid : ndarray, shape (n_samples, n_target_features)
116
+ The grid points on which the partial dependence should be
117
+ evaluated.
118
+ target_features : ndarray, shape (n_target_features)
119
+ The set of target features for which the partial dependence
120
+ should be evaluated.
121
+ out : ndarray, shape (n_samples)
122
+ The value of the partial dependence function on each grid
123
+ point.
124
+ """
125
+ _compute_partial_dependence(self.nodes, grid, target_features, out)
126
+
127
+ def __setstate__(self, state):
128
+ try:
129
+ super().__setstate__(state)
130
+ except AttributeError:
131
+ self.__dict__.update(state)
132
+
133
+ # The dtype of feature_idx is np.intp which is platform dependent. Here, we
134
+ # make sure that saving and loading on different bitness systems works without
135
+ # errors. For instance, on a 64 bit Python runtime, np.intp = np.int64,
136
+ # while on 32 bit np.intp = np.int32.
137
+ #
138
+ # TODO: consider always using platform agnostic dtypes for fitted
139
+ # estimator attributes. For this particular estimator, this would
140
+ # mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32
141
+ # field. Ideally this should be done consistently throughout
142
+ # scikit-learn along with a common test.
143
+ if self.nodes.dtype != PREDICTOR_RECORD_DTYPE:
144
+ self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind")
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (369 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (214 Bytes). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc ADDED
Binary file (4.62 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc ADDED
Binary file (38.2 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc ADDED
Binary file (8.8 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc ADDED
Binary file (4.52 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_array_equal
4
+
5
+ from sklearn.ensemble._hist_gradient_boosting.binning import (
6
+ _BinMapper,
7
+ _find_binning_thresholds,
8
+ _map_to_bins,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.common import (
11
+ ALMOST_INF,
12
+ X_BINNED_DTYPE,
13
+ X_DTYPE,
14
+ )
15
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
16
+
17
+ n_threads = _openmp_effective_n_threads()
18
+
19
+
20
+ DATA = (
21
+ np.random.RandomState(42)
22
+ .normal(loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2))
23
+ .astype(X_DTYPE)
24
+ )
25
+
26
+
27
+ def test_find_binning_thresholds_regular_data():
28
+ data = np.linspace(0, 10, 1001)
29
+ bin_thresholds = _find_binning_thresholds(data, max_bins=10)
30
+ assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
31
+
32
+ bin_thresholds = _find_binning_thresholds(data, max_bins=5)
33
+ assert_allclose(bin_thresholds, [2, 4, 6, 8])
34
+
35
+
36
+ def test_find_binning_thresholds_small_regular_data():
37
+ data = np.linspace(0, 10, 11)
38
+
39
+ bin_thresholds = _find_binning_thresholds(data, max_bins=5)
40
+ assert_allclose(bin_thresholds, [2, 4, 6, 8])
41
+
42
+ bin_thresholds = _find_binning_thresholds(data, max_bins=10)
43
+ assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
44
+
45
+ bin_thresholds = _find_binning_thresholds(data, max_bins=11)
46
+ assert_allclose(bin_thresholds, np.arange(10) + 0.5)
47
+
48
+ bin_thresholds = _find_binning_thresholds(data, max_bins=255)
49
+ assert_allclose(bin_thresholds, np.arange(10) + 0.5)
50
+
51
+
52
+ def test_find_binning_thresholds_random_data():
53
+ bin_thresholds = [
54
+ _find_binning_thresholds(DATA[:, i], max_bins=255) for i in range(2)
55
+ ]
56
+ for i in range(len(bin_thresholds)):
57
+ assert bin_thresholds[i].shape == (254,) # 255 - 1
58
+ assert bin_thresholds[i].dtype == DATA.dtype
59
+
60
+ assert_allclose(
61
+ bin_thresholds[0][[64, 128, 192]], np.array([-0.7, 0.0, 0.7]), atol=1e-1
62
+ )
63
+
64
+ assert_allclose(
65
+ bin_thresholds[1][[64, 128, 192]], np.array([9.99, 10.00, 10.01]), atol=1e-2
66
+ )
67
+
68
+
69
+ def test_find_binning_thresholds_low_n_bins():
70
+ bin_thresholds = [
71
+ _find_binning_thresholds(DATA[:, i], max_bins=128) for i in range(2)
72
+ ]
73
+ for i in range(len(bin_thresholds)):
74
+ assert bin_thresholds[i].shape == (127,) # 128 - 1
75
+ assert bin_thresholds[i].dtype == DATA.dtype
76
+
77
+
78
+ @pytest.mark.parametrize("n_bins", (2, 257))
79
+ def test_invalid_n_bins(n_bins):
80
+ err_msg = "n_bins={} should be no smaller than 3 and no larger than 256".format(
81
+ n_bins
82
+ )
83
+ with pytest.raises(ValueError, match=err_msg):
84
+ _BinMapper(n_bins=n_bins).fit(DATA)
85
+
86
+
87
+ def test_bin_mapper_n_features_transform():
88
+ mapper = _BinMapper(n_bins=42, random_state=42).fit(DATA)
89
+ err_msg = "This estimator was fitted with 2 features but 4 got passed"
90
+ with pytest.raises(ValueError, match=err_msg):
91
+ mapper.transform(np.repeat(DATA, 2, axis=1))
92
+
93
+
94
+ @pytest.mark.parametrize("max_bins", [16, 128, 255])
95
+ def test_map_to_bins(max_bins):
96
+ bin_thresholds = [
97
+ _find_binning_thresholds(DATA[:, i], max_bins=max_bins) for i in range(2)
98
+ ]
99
+ binned = np.zeros_like(DATA, dtype=X_BINNED_DTYPE, order="F")
100
+ is_categorical = np.zeros(2, dtype=np.uint8)
101
+ last_bin_idx = max_bins
102
+ _map_to_bins(DATA, bin_thresholds, is_categorical, last_bin_idx, n_threads, binned)
103
+ assert binned.shape == DATA.shape
104
+ assert binned.dtype == np.uint8
105
+ assert binned.flags.f_contiguous
106
+
107
+ min_indices = DATA.argmin(axis=0)
108
+ max_indices = DATA.argmax(axis=0)
109
+
110
+ for feature_idx, min_idx in enumerate(min_indices):
111
+ assert binned[min_idx, feature_idx] == 0
112
+ for feature_idx, max_idx in enumerate(max_indices):
113
+ assert binned[max_idx, feature_idx] == max_bins - 1
114
+
115
+
116
+ @pytest.mark.parametrize("max_bins", [5, 10, 42])
117
+ def test_bin_mapper_random_data(max_bins):
118
+ n_samples, n_features = DATA.shape
119
+
120
+ expected_count_per_bin = n_samples // max_bins
121
+ tol = int(0.05 * expected_count_per_bin)
122
+
123
+ # max_bins is the number of bins for non-missing values
124
+ n_bins = max_bins + 1
125
+ mapper = _BinMapper(n_bins=n_bins, random_state=42).fit(DATA)
126
+ binned = mapper.transform(DATA)
127
+
128
+ assert binned.shape == (n_samples, n_features)
129
+ assert binned.dtype == np.uint8
130
+ assert_array_equal(binned.min(axis=0), np.array([0, 0]))
131
+ assert_array_equal(binned.max(axis=0), np.array([max_bins - 1, max_bins - 1]))
132
+ assert len(mapper.bin_thresholds_) == n_features
133
+ for bin_thresholds_feature in mapper.bin_thresholds_:
134
+ assert bin_thresholds_feature.shape == (max_bins - 1,)
135
+ assert bin_thresholds_feature.dtype == DATA.dtype
136
+ assert np.all(mapper.n_bins_non_missing_ == max_bins)
137
+
138
+ # Check that the binned data is approximately balanced across bins.
139
+ for feature_idx in range(n_features):
140
+ for bin_idx in range(max_bins):
141
+ count = (binned[:, feature_idx] == bin_idx).sum()
142
+ assert abs(count - expected_count_per_bin) < tol
143
+
144
+
145
+ @pytest.mark.parametrize("n_samples, max_bins", [(5, 5), (5, 10), (5, 11), (42, 255)])
146
+ def test_bin_mapper_small_random_data(n_samples, max_bins):
147
+ data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)
148
+ assert len(np.unique(data)) == n_samples
149
+
150
+ # max_bins is the number of bins for non-missing values
151
+ n_bins = max_bins + 1
152
+ mapper = _BinMapper(n_bins=n_bins, random_state=42)
153
+ binned = mapper.fit_transform(data)
154
+
155
+ assert binned.shape == data.shape
156
+ assert binned.dtype == np.uint8
157
+ assert_array_equal(binned.ravel()[np.argsort(data.ravel())], np.arange(n_samples))
158
+
159
+
160
+ @pytest.mark.parametrize(
161
+ "max_bins, n_distinct, multiplier",
162
+ [
163
+ (5, 5, 1),
164
+ (5, 5, 3),
165
+ (255, 12, 42),
166
+ ],
167
+ )
168
+ def test_bin_mapper_identity_repeated_values(max_bins, n_distinct, multiplier):
169
+ data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)
170
+ # max_bins is the number of bins for non-missing values
171
+ n_bins = max_bins + 1
172
+ binned = _BinMapper(n_bins=n_bins).fit_transform(data)
173
+ assert_array_equal(data, binned)
174
+
175
+
176
+ @pytest.mark.parametrize("n_distinct", [2, 7, 42])
177
+ def test_bin_mapper_repeated_values_invariance(n_distinct):
178
+ rng = np.random.RandomState(42)
179
+ distinct_values = rng.normal(size=n_distinct)
180
+ assert len(np.unique(distinct_values)) == n_distinct
181
+
182
+ repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)
183
+ data = distinct_values[repeated_indices]
184
+ rng.shuffle(data)
185
+ assert_array_equal(np.unique(data), np.sort(distinct_values))
186
+
187
+ data = data.reshape(-1, 1)
188
+
189
+ mapper_1 = _BinMapper(n_bins=n_distinct + 1)
190
+ binned_1 = mapper_1.fit_transform(data)
191
+ assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))
192
+
193
+ # Adding more bins to the mapper yields the same results (same thresholds)
194
+ mapper_2 = _BinMapper(n_bins=min(256, n_distinct * 3) + 1)
195
+ binned_2 = mapper_2.fit_transform(data)
196
+
197
+ assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])
198
+ assert_array_equal(binned_1, binned_2)
199
+
200
+
201
+ @pytest.mark.parametrize(
202
+ "max_bins, scale, offset",
203
+ [
204
+ (3, 2, -1),
205
+ (42, 1, 0),
206
+ (255, 0.3, 42),
207
+ ],
208
+ )
209
+ def test_bin_mapper_identity_small(max_bins, scale, offset):
210
+ data = np.arange(max_bins).reshape(-1, 1) * scale + offset
211
+ # max_bins is the number of bins for non-missing values
212
+ n_bins = max_bins + 1
213
+ binned = _BinMapper(n_bins=n_bins).fit_transform(data)
214
+ assert_array_equal(binned, np.arange(max_bins).reshape(-1, 1))
215
+
216
+
217
+ @pytest.mark.parametrize(
218
+ "max_bins_small, max_bins_large",
219
+ [
220
+ (2, 2),
221
+ (3, 3),
222
+ (4, 4),
223
+ (42, 42),
224
+ (255, 255),
225
+ (5, 17),
226
+ (42, 255),
227
+ ],
228
+ )
229
+ def test_bin_mapper_idempotence(max_bins_small, max_bins_large):
230
+ assert max_bins_large >= max_bins_small
231
+ data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)
232
+ mapper_small = _BinMapper(n_bins=max_bins_small + 1)
233
+ mapper_large = _BinMapper(n_bins=max_bins_small + 1)
234
+ binned_small = mapper_small.fit_transform(data)
235
+ binned_large = mapper_large.fit_transform(binned_small)
236
+ assert_array_equal(binned_small, binned_large)
237
+
238
+
239
+ @pytest.mark.parametrize("n_bins", [10, 100, 256])
240
+ @pytest.mark.parametrize("diff", [-5, 0, 5])
241
+ def test_n_bins_non_missing(n_bins, diff):
242
+ # Check that n_bins_non_missing is n_unique_values when
243
+ # there are not a lot of unique values, else n_bins - 1.
244
+
245
+ n_unique_values = n_bins + diff
246
+ X = list(range(n_unique_values)) * 2
247
+ X = np.array(X).reshape(-1, 1)
248
+ mapper = _BinMapper(n_bins=n_bins).fit(X)
249
+ assert np.all(mapper.n_bins_non_missing_ == min(n_bins - 1, n_unique_values))
250
+
251
+
252
+ def test_subsample():
253
+ # Make sure bin thresholds are different when applying subsampling
254
+ mapper_no_subsample = _BinMapper(subsample=None, random_state=0).fit(DATA)
255
+ mapper_subsample = _BinMapper(subsample=256, random_state=0).fit(DATA)
256
+
257
+ for feature in range(DATA.shape[1]):
258
+ assert not np.allclose(
259
+ mapper_no_subsample.bin_thresholds_[feature],
260
+ mapper_subsample.bin_thresholds_[feature],
261
+ rtol=1e-4,
262
+ )
263
+
264
+
265
+ @pytest.mark.parametrize(
266
+ "n_bins, n_bins_non_missing, X_trans_expected",
267
+ [
268
+ (
269
+ 256,
270
+ [4, 2, 2],
271
+ [
272
+ [0, 0, 0], # 255 <=> missing value
273
+ [255, 255, 0],
274
+ [1, 0, 0],
275
+ [255, 1, 1],
276
+ [2, 1, 1],
277
+ [3, 0, 0],
278
+ ],
279
+ ),
280
+ (
281
+ 3,
282
+ [2, 2, 2],
283
+ [
284
+ [0, 0, 0], # 2 <=> missing value
285
+ [2, 2, 0],
286
+ [0, 0, 0],
287
+ [2, 1, 1],
288
+ [1, 1, 1],
289
+ [1, 0, 0],
290
+ ],
291
+ ),
292
+ ],
293
+ )
294
+ def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):
295
+ # check for missing values: make sure nans are mapped to the last bin
296
+ # and that the _BinMapper attributes are correct
297
+
298
+ X = [
299
+ [1, 1, 0],
300
+ [np.nan, np.nan, 0],
301
+ [2, 1, 0],
302
+ [np.nan, 2, 1],
303
+ [3, 2, 1],
304
+ [4, 1, 0],
305
+ ]
306
+
307
+ X = np.array(X)
308
+
309
+ mapper = _BinMapper(n_bins=n_bins)
310
+ mapper.fit(X)
311
+
312
+ assert_array_equal(mapper.n_bins_non_missing_, n_bins_non_missing)
313
+
314
+ for feature_idx in range(X.shape[1]):
315
+ assert (
316
+ len(mapper.bin_thresholds_[feature_idx])
317
+ == n_bins_non_missing[feature_idx] - 1
318
+ )
319
+
320
+ assert mapper.missing_values_bin_idx_ == n_bins - 1
321
+
322
+ X_trans = mapper.transform(X)
323
+ assert_array_equal(X_trans, X_trans_expected)
324
+
325
+
326
+ def test_infinite_values():
327
+ # Make sure infinite values are properly handled.
328
+ bin_mapper = _BinMapper()
329
+
330
+ X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
331
+
332
+ bin_mapper.fit(X)
333
+ assert_allclose(bin_mapper.bin_thresholds_[0], [-np.inf, 0.5, ALMOST_INF])
334
+ assert bin_mapper.n_bins_non_missing_ == [4]
335
+
336
+ expected_binned_X = np.array([0, 1, 2, 3]).reshape(-1, 1)
337
+ assert_array_equal(bin_mapper.transform(X), expected_binned_X)
338
+
339
+
340
+ @pytest.mark.parametrize("n_bins", [15, 256])
341
+ def test_categorical_feature(n_bins):
342
+ # Basic test for categorical features
343
+ # we make sure that categories are mapped into [0, n_categories - 1] and
344
+ # that nans are mapped to the last bin
345
+ X = np.array(
346
+ [[4] * 500 + [1] * 3 + [10] * 4 + [0] * 4 + [13] + [7] * 5 + [np.nan] * 2],
347
+ dtype=X_DTYPE,
348
+ ).T
349
+ known_categories = [np.unique(X[~np.isnan(X)])]
350
+
351
+ bin_mapper = _BinMapper(
352
+ n_bins=n_bins,
353
+ is_categorical=np.array([True]),
354
+ known_categories=known_categories,
355
+ ).fit(X)
356
+ assert bin_mapper.n_bins_non_missing_ == [6]
357
+ assert_array_equal(bin_mapper.bin_thresholds_[0], [0, 1, 4, 7, 10, 13])
358
+
359
+ X = np.array([[0, 1, 4, np.nan, 7, 10, 13]], dtype=X_DTYPE).T
360
+ expected_trans = np.array([[0, 1, 2, n_bins - 1, 3, 4, 5]]).T
361
+ assert_array_equal(bin_mapper.transform(X), expected_trans)
362
+
363
+ # Negative categories are mapped to the missing values' bin
364
+ # (i.e. the bin of index `missing_values_bin_idx_ == n_bins - 1).
365
+ # Unknown positive categories does not happen in practice and tested
366
+ # for illustration purpose.
367
+ X = np.array([[-4, -1, 100]], dtype=X_DTYPE).T
368
+ expected_trans = np.array([[n_bins - 1, n_bins - 1, 6]]).T
369
+ assert_array_equal(bin_mapper.transform(X), expected_trans)
370
+
371
+
372
+ def test_categorical_feature_negative_missing():
373
+ """Make sure bin mapper treats negative categories as missing values."""
374
+ X = np.array(
375
+ [[4] * 500 + [1] * 3 + [5] * 10 + [-1] * 3 + [np.nan] * 4], dtype=X_DTYPE
376
+ ).T
377
+ bin_mapper = _BinMapper(
378
+ n_bins=4,
379
+ is_categorical=np.array([True]),
380
+ known_categories=[np.array([1, 4, 5], dtype=X_DTYPE)],
381
+ ).fit(X)
382
+
383
+ assert bin_mapper.n_bins_non_missing_ == [3]
384
+
385
+ X = np.array([[-1, 1, 3, 5, np.nan]], dtype=X_DTYPE).T
386
+
387
+ # Negative values for categorical features are considered as missing values.
388
+ # They are mapped to the bin of index `bin_mapper.missing_values_bin_idx_`,
389
+ # which is 3 here.
390
+ assert bin_mapper.missing_values_bin_idx_ == 3
391
+ expected_trans = np.array([[3, 0, 1, 2, 3]]).T
392
+ assert_array_equal(bin_mapper.transform(X), expected_trans)
393
+
394
+
395
+ @pytest.mark.parametrize("n_bins", (128, 256))
396
+ def test_categorical_with_numerical_features(n_bins):
397
+ # basic check for binmapper with mixed data
398
+ X1 = np.arange(10, 20).reshape(-1, 1) # numerical
399
+ X2 = np.arange(10, 15).reshape(-1, 1) # categorical
400
+ X2 = np.r_[X2, X2]
401
+ X = np.c_[X1, X2]
402
+ known_categories = [None, np.unique(X2).astype(X_DTYPE)]
403
+
404
+ bin_mapper = _BinMapper(
405
+ n_bins=n_bins,
406
+ is_categorical=np.array([False, True]),
407
+ known_categories=known_categories,
408
+ ).fit(X)
409
+
410
+ assert_array_equal(bin_mapper.n_bins_non_missing_, [10, 5])
411
+
412
+ bin_thresholds = bin_mapper.bin_thresholds_
413
+ assert len(bin_thresholds) == 2
414
+ assert_array_equal(bin_thresholds[1], np.arange(10, 15))
415
+
416
+ expected_X_trans = [
417
+ [0, 0],
418
+ [1, 1],
419
+ [2, 2],
420
+ [3, 3],
421
+ [4, 4],
422
+ [5, 0],
423
+ [6, 1],
424
+ [7, 2],
425
+ [8, 3],
426
+ [9, 4],
427
+ ]
428
+ assert_array_equal(bin_mapper.transform(X), expected_X_trans)
429
+
430
+
431
+ def test_make_known_categories_bitsets():
432
+ # Check the output of make_known_categories_bitsets
433
+ X = np.array(
434
+ [[14, 2, 30], [30, 4, 70], [40, 10, 180], [40, 240, 180]], dtype=X_DTYPE
435
+ )
436
+
437
+ bin_mapper = _BinMapper(
438
+ n_bins=256,
439
+ is_categorical=np.array([False, True, True]),
440
+ known_categories=[None, X[:, 1], X[:, 2]],
441
+ )
442
+ bin_mapper.fit(X)
443
+
444
+ known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
445
+
446
+ # Note that for non-categorical features, values are left to 0
447
+ expected_f_idx_map = np.array([0, 0, 1], dtype=np.uint8)
448
+ assert_allclose(expected_f_idx_map, f_idx_map)
449
+
450
+ expected_cat_bitset = np.zeros((2, 8), dtype=np.uint32)
451
+
452
+ # first categorical feature: [2, 4, 10, 240]
453
+ f_idx = 1
454
+ mapped_f_idx = f_idx_map[f_idx]
455
+ expected_cat_bitset[mapped_f_idx, 0] = 2**2 + 2**4 + 2**10
456
+ # 240 = 32**7 + 16, therefore the 16th bit of the 7th array is 1.
457
+ expected_cat_bitset[mapped_f_idx, 7] = 2**16
458
+
459
+ # second categorical feature [30, 70, 180]
460
+ f_idx = 2
461
+ mapped_f_idx = f_idx_map[f_idx]
462
+ expected_cat_bitset[mapped_f_idx, 0] = 2**30
463
+ expected_cat_bitset[mapped_f_idx, 2] = 2**6
464
+ expected_cat_bitset[mapped_f_idx, 5] = 2**20
465
+
466
+ assert_allclose(expected_cat_bitset, known_cat_bitsets)
467
+
468
+
469
+ @pytest.mark.parametrize(
470
+ "is_categorical, known_categories, match",
471
+ [
472
+ (np.array([True]), [None], "Known categories for feature 0 must be provided"),
473
+ (
474
+ np.array([False]),
475
+ np.array([1, 2, 3]),
476
+ "isn't marked as a categorical feature, but categories were passed",
477
+ ),
478
+ ],
479
+ )
480
+ def test_categorical_parameters(is_categorical, known_categories, match):
481
+ # test the validation of the is_categorical and known_categories parameters
482
+
483
+ X = np.array([[1, 2, 3]], dtype=X_DTYPE)
484
+
485
+ bin_mapper = _BinMapper(
486
+ is_categorical=is_categorical, known_categories=known_categories
487
+ )
488
+ with pytest.raises(ValueError, match=match):
489
+ bin_mapper.fit(X)
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose
4
+
5
+ from sklearn.ensemble._hist_gradient_boosting._bitset import (
6
+ in_bitset_memoryview,
7
+ set_bitset_memoryview,
8
+ set_raw_bitset_from_binned_bitset,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
11
+
12
+
13
+ @pytest.mark.parametrize(
14
+ "values_to_insert, expected_bitset",
15
+ [
16
+ ([0, 4, 33], np.array([2**0 + 2**4, 2**1, 0], dtype=np.uint32)),
17
+ (
18
+ [31, 32, 33, 79],
19
+ np.array([2**31, 2**0 + 2**1, 2**15], dtype=np.uint32),
20
+ ),
21
+ ],
22
+ )
23
+ def test_set_get_bitset(values_to_insert, expected_bitset):
24
+ n_32bits_ints = 3
25
+ bitset = np.zeros(n_32bits_ints, dtype=np.uint32)
26
+ for value in values_to_insert:
27
+ set_bitset_memoryview(bitset, value)
28
+ assert_allclose(expected_bitset, bitset)
29
+ for value in range(32 * n_32bits_ints):
30
+ if value in values_to_insert:
31
+ assert in_bitset_memoryview(bitset, value)
32
+ else:
33
+ assert not in_bitset_memoryview(bitset, value)
34
+
35
+
36
+ @pytest.mark.parametrize(
37
+ "raw_categories, binned_cat_to_insert, expected_raw_bitset",
38
+ [
39
+ (
40
+ [3, 4, 5, 10, 31, 32, 43],
41
+ [0, 2, 4, 5, 6],
42
+ [2**3 + 2**5 + 2**31, 2**0 + 2**11],
43
+ ),
44
+ ([3, 33, 50, 52], [1, 3], [0, 2**1 + 2**20]),
45
+ ],
46
+ )
47
+ def test_raw_bitset_from_binned_bitset(
48
+ raw_categories, binned_cat_to_insert, expected_raw_bitset
49
+ ):
50
+ binned_bitset = np.zeros(2, dtype=np.uint32)
51
+ raw_bitset = np.zeros(2, dtype=np.uint32)
52
+ raw_categories = np.asarray(raw_categories, dtype=X_DTYPE)
53
+
54
+ for val in binned_cat_to_insert:
55
+ set_bitset_memoryview(binned_bitset, val)
56
+
57
+ set_raw_bitset_from_binned_bitset(raw_bitset, binned_bitset, raw_categories)
58
+
59
+ assert_allclose(expected_raw_bitset, raw_bitset)
60
+ for binned_cat_val, raw_cat_val in enumerate(raw_categories):
61
+ if binned_cat_val in binned_cat_to_insert:
62
+ assert in_bitset_memoryview(raw_bitset, raw_cat_val)
63
+ else:
64
+ assert not in_bitset_memoryview(raw_bitset, raw_cat_val)
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.datasets import make_classification, make_regression
5
+ from sklearn.ensemble import (
6
+ HistGradientBoostingClassifier,
7
+ HistGradientBoostingRegressor,
8
+ )
9
+ from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
10
+ from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
11
+ from sklearn.metrics import accuracy_score
12
+ from sklearn.model_selection import train_test_split
13
+
14
+
15
+ @pytest.mark.parametrize("seed", range(5))
16
+ @pytest.mark.parametrize(
17
+ "loss",
18
+ [
19
+ "squared_error",
20
+ "poisson",
21
+ pytest.param(
22
+ "gamma",
23
+ marks=pytest.mark.skip("LightGBM with gamma loss has larger deviation."),
24
+ ),
25
+ ],
26
+ )
27
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
28
+ @pytest.mark.parametrize(
29
+ "n_samples, max_leaf_nodes",
30
+ [
31
+ (255, 4096),
32
+ (1000, 8),
33
+ ],
34
+ )
35
+ def test_same_predictions_regression(
36
+ seed, loss, min_samples_leaf, n_samples, max_leaf_nodes
37
+ ):
38
+ # Make sure sklearn has the same predictions as lightgbm for easy targets.
39
+ #
40
+ # In particular when the size of the trees are bound and the number of
41
+ # samples is large enough, the structure of the prediction trees found by
42
+ # LightGBM and sklearn should be exactly identical.
43
+ #
44
+ # Notes:
45
+ # - Several candidate splits may have equal gains when the number of
46
+ # samples in a node is low (and because of float errors). Therefore the
47
+ # predictions on the test set might differ if the structure of the tree
48
+ # is not exactly the same. To avoid this issue we only compare the
49
+ # predictions on the test set when the number of samples is large enough
50
+ # and max_leaf_nodes is low enough.
51
+ # - To ignore discrepancies caused by small differences in the binning
52
+ # strategy, data is pre-binned if n_samples > 255.
53
+ # - We don't check the absolute_error loss here. This is because
54
+ # LightGBM's computation of the median (used for the initial value of
55
+ # raw_prediction) is a bit off (they'll e.g. return midpoints when there
56
+ # is no need to.). Since these tests only run 1 iteration, the
57
+ # discrepancy between the initial values leads to biggish differences in
58
+ # the predictions. These differences are much smaller with more
59
+ # iterations.
60
+ pytest.importorskip("lightgbm")
61
+
62
+ rng = np.random.RandomState(seed=seed)
63
+ max_iter = 1
64
+ max_bins = 255
65
+
66
+ X, y = make_regression(
67
+ n_samples=n_samples, n_features=5, n_informative=5, random_state=0
68
+ )
69
+
70
+ if loss in ("gamma", "poisson"):
71
+ # make the target positive
72
+ y = np.abs(y) + np.mean(np.abs(y))
73
+
74
+ if n_samples > 255:
75
+ # bin data and convert it to float32 so that the estimator doesn't
76
+ # treat it as pre-binned
77
+ X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
78
+
79
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
80
+
81
+ est_sklearn = HistGradientBoostingRegressor(
82
+ loss=loss,
83
+ max_iter=max_iter,
84
+ max_bins=max_bins,
85
+ learning_rate=1,
86
+ early_stopping=False,
87
+ min_samples_leaf=min_samples_leaf,
88
+ max_leaf_nodes=max_leaf_nodes,
89
+ )
90
+ est_lightgbm = get_equivalent_estimator(est_sklearn, lib="lightgbm")
91
+ est_lightgbm.set_params(min_sum_hessian_in_leaf=0)
92
+
93
+ est_lightgbm.fit(X_train, y_train)
94
+ est_sklearn.fit(X_train, y_train)
95
+
96
+ # We need X to be treated an numerical data, not pre-binned data.
97
+ X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
98
+
99
+ pred_lightgbm = est_lightgbm.predict(X_train)
100
+ pred_sklearn = est_sklearn.predict(X_train)
101
+ if loss in ("gamma", "poisson"):
102
+ # More than 65% of the predictions must be close up to the 2nd decimal.
103
+ # TODO: We are not entirely satisfied with this lax comparison, but the root
104
+ # cause is not clear, maybe algorithmic differences. One such example is the
105
+ # poisson_max_delta_step parameter of LightGBM which does not exist in HGBT.
106
+ assert (
107
+ np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-2, atol=1e-2))
108
+ > 0.65
109
+ )
110
+ else:
111
+ # Less than 1% of the predictions may deviate more than 1e-3 in relative terms.
112
+ assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-3)) > 1 - 0.01
113
+
114
+ if max_leaf_nodes < 10 and n_samples >= 1000 and loss in ("squared_error",):
115
+ pred_lightgbm = est_lightgbm.predict(X_test)
116
+ pred_sklearn = est_sklearn.predict(X_test)
117
+ # Less than 1% of the predictions may deviate more than 1e-4 in relative terms.
118
+ assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-4)) > 1 - 0.01
119
+
120
+
121
+ @pytest.mark.parametrize("seed", range(5))
122
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
123
+ @pytest.mark.parametrize(
124
+ "n_samples, max_leaf_nodes",
125
+ [
126
+ (255, 4096),
127
+ (1000, 8),
128
+ ],
129
+ )
130
+ def test_same_predictions_classification(
131
+ seed, min_samples_leaf, n_samples, max_leaf_nodes
132
+ ):
133
+ # Same as test_same_predictions_regression but for classification
134
+ pytest.importorskip("lightgbm")
135
+
136
+ rng = np.random.RandomState(seed=seed)
137
+ max_iter = 1
138
+ n_classes = 2
139
+ max_bins = 255
140
+
141
+ X, y = make_classification(
142
+ n_samples=n_samples,
143
+ n_classes=n_classes,
144
+ n_features=5,
145
+ n_informative=5,
146
+ n_redundant=0,
147
+ random_state=0,
148
+ )
149
+
150
+ if n_samples > 255:
151
+ # bin data and convert it to float32 so that the estimator doesn't
152
+ # treat it as pre-binned
153
+ X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
154
+
155
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
156
+
157
+ est_sklearn = HistGradientBoostingClassifier(
158
+ loss="log_loss",
159
+ max_iter=max_iter,
160
+ max_bins=max_bins,
161
+ learning_rate=1,
162
+ early_stopping=False,
163
+ min_samples_leaf=min_samples_leaf,
164
+ max_leaf_nodes=max_leaf_nodes,
165
+ )
166
+ est_lightgbm = get_equivalent_estimator(
167
+ est_sklearn, lib="lightgbm", n_classes=n_classes
168
+ )
169
+
170
+ est_lightgbm.fit(X_train, y_train)
171
+ est_sklearn.fit(X_train, y_train)
172
+
173
+ # We need X to be treated an numerical data, not pre-binned data.
174
+ X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
175
+
176
+ pred_lightgbm = est_lightgbm.predict(X_train)
177
+ pred_sklearn = est_sklearn.predict(X_train)
178
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
179
+
180
+ acc_lightgbm = accuracy_score(y_train, pred_lightgbm)
181
+ acc_sklearn = accuracy_score(y_train, pred_sklearn)
182
+ np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn)
183
+
184
+ if max_leaf_nodes < 10 and n_samples >= 1000:
185
+ pred_lightgbm = est_lightgbm.predict(X_test)
186
+ pred_sklearn = est_sklearn.predict(X_test)
187
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
188
+
189
+ acc_lightgbm = accuracy_score(y_test, pred_lightgbm)
190
+ acc_sklearn = accuracy_score(y_test, pred_sklearn)
191
+ np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2)
192
+
193
+
194
+ @pytest.mark.parametrize("seed", range(5))
195
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
196
+ @pytest.mark.parametrize(
197
+ "n_samples, max_leaf_nodes",
198
+ [
199
+ (255, 4096),
200
+ (10000, 8),
201
+ ],
202
+ )
203
+ def test_same_predictions_multiclass_classification(
204
+ seed, min_samples_leaf, n_samples, max_leaf_nodes
205
+ ):
206
+ # Same as test_same_predictions_regression but for classification
207
+ pytest.importorskip("lightgbm")
208
+
209
+ rng = np.random.RandomState(seed=seed)
210
+ n_classes = 3
211
+ max_iter = 1
212
+ max_bins = 255
213
+ lr = 1
214
+
215
+ X, y = make_classification(
216
+ n_samples=n_samples,
217
+ n_classes=n_classes,
218
+ n_features=5,
219
+ n_informative=5,
220
+ n_redundant=0,
221
+ n_clusters_per_class=1,
222
+ random_state=0,
223
+ )
224
+
225
+ if n_samples > 255:
226
+ # bin data and convert it to float32 so that the estimator doesn't
227
+ # treat it as pre-binned
228
+ X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
229
+
230
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
231
+
232
+ est_sklearn = HistGradientBoostingClassifier(
233
+ loss="log_loss",
234
+ max_iter=max_iter,
235
+ max_bins=max_bins,
236
+ learning_rate=lr,
237
+ early_stopping=False,
238
+ min_samples_leaf=min_samples_leaf,
239
+ max_leaf_nodes=max_leaf_nodes,
240
+ )
241
+ est_lightgbm = get_equivalent_estimator(
242
+ est_sklearn, lib="lightgbm", n_classes=n_classes
243
+ )
244
+
245
+ est_lightgbm.fit(X_train, y_train)
246
+ est_sklearn.fit(X_train, y_train)
247
+
248
+ # We need X to be treated an numerical data, not pre-binned data.
249
+ X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
250
+
251
+ pred_lightgbm = est_lightgbm.predict(X_train)
252
+ pred_sklearn = est_sklearn.predict(X_train)
253
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
254
+
255
+ proba_lightgbm = est_lightgbm.predict_proba(X_train)
256
+ proba_sklearn = est_sklearn.predict_proba(X_train)
257
+ # assert more than 75% of the predicted probabilities are the same up to
258
+ # the second decimal
259
+ assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75
260
+
261
+ acc_lightgbm = accuracy_score(y_train, pred_lightgbm)
262
+ acc_sklearn = accuracy_score(y_train, pred_sklearn)
263
+
264
+ np.testing.assert_allclose(acc_lightgbm, acc_sklearn, rtol=0, atol=5e-2)
265
+
266
+ if max_leaf_nodes < 10 and n_samples >= 1000:
267
+ pred_lightgbm = est_lightgbm.predict(X_test)
268
+ pred_sklearn = est_sklearn.predict(X_test)
269
+ assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
270
+
271
+ proba_lightgbm = est_lightgbm.predict_proba(X_train)
272
+ proba_sklearn = est_sklearn.predict_proba(X_train)
273
+ # assert more than 75% of the predicted probabilities are the same up
274
+ # to the second decimal
275
+ assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75
276
+
277
+ acc_lightgbm = accuracy_score(y_test, pred_lightgbm)
278
+ acc_sklearn = accuracy_score(y_test, pred_sklearn)
279
+ np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2)
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py ADDED
@@ -0,0 +1,1683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copyreg
2
+ import io
3
+ import pickle
4
+ import re
5
+ import warnings
6
+ from unittest.mock import Mock
7
+
8
+ import joblib
9
+ import numpy as np
10
+ import pytest
11
+ from joblib.numpy_pickle import NumpyPickler
12
+ from numpy.testing import assert_allclose, assert_array_equal
13
+
14
+ import sklearn
15
+ from sklearn._loss.loss import (
16
+ AbsoluteError,
17
+ HalfBinomialLoss,
18
+ HalfSquaredError,
19
+ PinballLoss,
20
+ )
21
+ from sklearn.base import BaseEstimator, TransformerMixin, clone, is_regressor
22
+ from sklearn.compose import make_column_transformer
23
+ from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression
24
+ from sklearn.dummy import DummyRegressor
25
+ from sklearn.ensemble import (
26
+ HistGradientBoostingClassifier,
27
+ HistGradientBoostingRegressor,
28
+ )
29
+ from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
30
+ from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
31
+ from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
32
+ from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
33
+ from sklearn.exceptions import NotFittedError
34
+ from sklearn.metrics import get_scorer, mean_gamma_deviance, mean_poisson_deviance
35
+ from sklearn.model_selection import cross_val_score, train_test_split
36
+ from sklearn.pipeline import make_pipeline
37
+ from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder
38
+ from sklearn.utils import _IS_32BIT, shuffle
39
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
40
+ from sklearn.utils._testing import _convert_container
41
+
42
+ n_threads = _openmp_effective_n_threads()
43
+
44
+ X_classification, y_classification = make_classification(random_state=0)
45
+ X_regression, y_regression = make_regression(random_state=0)
46
+ X_multi_classification, y_multi_classification = make_classification(
47
+ n_classes=3, n_informative=3, random_state=0
48
+ )
49
+
50
+
51
+ def _make_dumb_dataset(n_samples):
52
+ """Make a dumb dataset to test early stopping."""
53
+ rng = np.random.RandomState(42)
54
+ X_dumb = rng.randn(n_samples, 1)
55
+ y_dumb = (X_dumb[:, 0] > 0).astype("int64")
56
+ return X_dumb, y_dumb
57
+
58
+
59
+ @pytest.mark.parametrize(
60
+ "GradientBoosting, X, y",
61
+ [
62
+ (HistGradientBoostingClassifier, X_classification, y_classification),
63
+ (HistGradientBoostingRegressor, X_regression, y_regression),
64
+ ],
65
+ )
66
+ @pytest.mark.parametrize(
67
+ "params, err_msg",
68
+ [
69
+ (
70
+ {"interaction_cst": [0, 1]},
71
+ "Interaction constraints must be a sequence of tuples or lists",
72
+ ),
73
+ (
74
+ {"interaction_cst": [{0, 9999}]},
75
+ r"Interaction constraints must consist of integer indices in \[0,"
76
+ r" n_features - 1\] = \[.*\], specifying the position of features,",
77
+ ),
78
+ (
79
+ {"interaction_cst": [{-1, 0}]},
80
+ r"Interaction constraints must consist of integer indices in \[0,"
81
+ r" n_features - 1\] = \[.*\], specifying the position of features,",
82
+ ),
83
+ (
84
+ {"interaction_cst": [{0.5}]},
85
+ r"Interaction constraints must consist of integer indices in \[0,"
86
+ r" n_features - 1\] = \[.*\], specifying the position of features,",
87
+ ),
88
+ ],
89
+ )
90
+ def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
91
+ with pytest.raises(ValueError, match=err_msg):
92
+ GradientBoosting(**params).fit(X, y)
93
+
94
+
95
+ @pytest.mark.parametrize(
96
+ "scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
97
+ [
98
+ ("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer
99
+ ("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train
100
+ (None, 0.1, True, 5, 1e-7), # same with default scorer
101
+ (None, None, True, 5, 1e-1),
102
+ ("loss", 0.1, True, 5, 1e-7), # use loss
103
+ ("loss", None, True, 5, 1e-1), # use loss on training data
104
+ (None, None, False, 5, 0.0), # no early stopping
105
+ ],
106
+ )
107
+ def test_early_stopping_regression(
108
+ scoring, validation_fraction, early_stopping, n_iter_no_change, tol
109
+ ):
110
+ max_iter = 200
111
+
112
+ X, y = make_regression(n_samples=50, random_state=0)
113
+
114
+ gb = HistGradientBoostingRegressor(
115
+ verbose=1, # just for coverage
116
+ min_samples_leaf=5, # easier to overfit fast
117
+ scoring=scoring,
118
+ tol=tol,
119
+ early_stopping=early_stopping,
120
+ validation_fraction=validation_fraction,
121
+ max_iter=max_iter,
122
+ n_iter_no_change=n_iter_no_change,
123
+ random_state=0,
124
+ )
125
+ gb.fit(X, y)
126
+
127
+ if early_stopping:
128
+ assert n_iter_no_change <= gb.n_iter_ < max_iter
129
+ else:
130
+ assert gb.n_iter_ == max_iter
131
+
132
+
133
+ @pytest.mark.parametrize(
134
+ "data",
135
+ (
136
+ make_classification(n_samples=30, random_state=0),
137
+ make_classification(
138
+ n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0
139
+ ),
140
+ ),
141
+ )
142
+ @pytest.mark.parametrize(
143
+ "scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
144
+ [
145
+ ("accuracy", 0.1, True, 5, 1e-7), # use scorer
146
+ ("accuracy", None, True, 5, 1e-1), # use scorer on training data
147
+ (None, 0.1, True, 5, 1e-7), # same with default scorer
148
+ (None, None, True, 5, 1e-1),
149
+ ("loss", 0.1, True, 5, 1e-7), # use loss
150
+ ("loss", None, True, 5, 1e-1), # use loss on training data
151
+ (None, None, False, 5, 0.0), # no early stopping
152
+ ],
153
+ )
154
+ def test_early_stopping_classification(
155
+ data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol
156
+ ):
157
+ max_iter = 50
158
+
159
+ X, y = data
160
+
161
+ gb = HistGradientBoostingClassifier(
162
+ verbose=1, # just for coverage
163
+ min_samples_leaf=5, # easier to overfit fast
164
+ scoring=scoring,
165
+ tol=tol,
166
+ early_stopping=early_stopping,
167
+ validation_fraction=validation_fraction,
168
+ max_iter=max_iter,
169
+ n_iter_no_change=n_iter_no_change,
170
+ random_state=0,
171
+ )
172
+ gb.fit(X, y)
173
+
174
+ if early_stopping is True:
175
+ assert n_iter_no_change <= gb.n_iter_ < max_iter
176
+ else:
177
+ assert gb.n_iter_ == max_iter
178
+
179
+
180
+ @pytest.mark.parametrize(
181
+ "GradientBoosting, X, y",
182
+ [
183
+ (HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
184
+ (HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
185
+ (HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
186
+ (HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),
187
+ ],
188
+ )
189
+ def test_early_stopping_default(GradientBoosting, X, y):
190
+ # Test that early stopping is enabled by default if and only if there
191
+ # are more than 10000 samples
192
+ gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
193
+ gb.fit(X, y)
194
+ if X.shape[0] > 10000:
195
+ assert gb.n_iter_ < gb.max_iter
196
+ else:
197
+ assert gb.n_iter_ == gb.max_iter
198
+
199
+
200
+ @pytest.mark.parametrize(
201
+ "scores, n_iter_no_change, tol, stopping",
202
+ [
203
+ ([], 1, 0.001, False), # not enough iterations
204
+ ([1, 1, 1], 5, 0.001, False), # not enough iterations
205
+ ([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
206
+ ([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
207
+ ([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement
208
+ ([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
209
+ ([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
210
+ ([1] * 6, 5, 0.0, True), # no significant improvement
211
+ ([1] * 6, 5, 0.001, True), # no significant improvement
212
+ ([1] * 6, 5, 5, True), # no significant improvement
213
+ ],
214
+ )
215
+ def test_should_stop(scores, n_iter_no_change, tol, stopping):
216
+ gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)
217
+ assert gbdt._should_stop(scores) == stopping
218
+
219
+
220
+ def test_absolute_error():
221
+ # For coverage only.
222
+ X, y = make_regression(n_samples=500, random_state=0)
223
+ gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0)
224
+ gbdt.fit(X, y)
225
+ assert gbdt.score(X, y) > 0.9
226
+
227
+
228
+ def test_absolute_error_sample_weight():
229
+ # non regression test for issue #19400
230
+ # make sure no error is thrown during fit of
231
+ # HistGradientBoostingRegressor with absolute_error loss function
232
+ # and passing sample_weight
233
+ rng = np.random.RandomState(0)
234
+ n_samples = 100
235
+ X = rng.uniform(-1, 1, size=(n_samples, 2))
236
+ y = rng.uniform(-1, 1, size=n_samples)
237
+ sample_weight = rng.uniform(0, 1, size=n_samples)
238
+ gbdt = HistGradientBoostingRegressor(loss="absolute_error")
239
+ gbdt.fit(X, y, sample_weight=sample_weight)
240
+
241
+
242
+ @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 1.0, 2.0])])
243
+ def test_gamma_y_positive(y):
244
+ # Test that ValueError is raised if any y_i <= 0.
245
+ err_msg = r"loss='gamma' requires strictly positive y."
246
+ gbdt = HistGradientBoostingRegressor(loss="gamma", random_state=0)
247
+ with pytest.raises(ValueError, match=err_msg):
248
+ gbdt.fit(np.zeros(shape=(len(y), 1)), y)
249
+
250
+
251
+ def test_gamma():
252
+ # For a Gamma distributed target, we expect an HGBT trained with the Gamma deviance
253
+ # (loss) to give better results than an HGBT with any other loss function, measured
254
+ # in out-of-sample Gamma deviance as metric/score.
255
+ # Note that squared error could potentially predict negative values which is
256
+ # invalid (np.inf) for the Gamma deviance. A Poisson HGBT (having a log link)
257
+ # does not have that defect.
258
+ # Important note: It seems that a Poisson HGBT almost always has better
259
+ # out-of-sample performance than the Gamma HGBT, measured in Gamma deviance.
260
+ # LightGBM shows the same behaviour. Hence, we only compare to a squared error
261
+ # HGBT, but not to a Poisson deviance HGBT.
262
+ rng = np.random.RandomState(42)
263
+ n_train, n_test, n_features = 500, 100, 20
264
+ X = make_low_rank_matrix(
265
+ n_samples=n_train + n_test,
266
+ n_features=n_features,
267
+ random_state=rng,
268
+ )
269
+ # We create a log-linear Gamma model. This gives y.min ~ 1e-2, y.max ~ 1e2
270
+ coef = rng.uniform(low=-10, high=20, size=n_features)
271
+ # Numpy parametrizes gamma(shape=k, scale=theta) with mean = k * theta and
272
+ # variance = k * theta^2. We parametrize it instead with mean = exp(X @ coef)
273
+ # and variance = dispersion * mean^2 by setting k = 1 / dispersion,
274
+ # theta = dispersion * mean.
275
+ dispersion = 0.5
276
+ y = rng.gamma(shape=1 / dispersion, scale=dispersion * np.exp(X @ coef))
277
+ X_train, X_test, y_train, y_test = train_test_split(
278
+ X, y, test_size=n_test, random_state=rng
279
+ )
280
+ gbdt_gamma = HistGradientBoostingRegressor(loss="gamma", random_state=123)
281
+ gbdt_mse = HistGradientBoostingRegressor(loss="squared_error", random_state=123)
282
+ dummy = DummyRegressor(strategy="mean")
283
+ for model in (gbdt_gamma, gbdt_mse, dummy):
284
+ model.fit(X_train, y_train)
285
+
286
+ for X, y in [(X_train, y_train), (X_test, y_test)]:
287
+ loss_gbdt_gamma = mean_gamma_deviance(y, gbdt_gamma.predict(X))
288
+ # We restrict the squared error HGBT to predict at least the minimum seen y at
289
+ # train time to make it strictly positive.
290
+ loss_gbdt_mse = mean_gamma_deviance(
291
+ y, np.maximum(np.min(y_train), gbdt_mse.predict(X))
292
+ )
293
+ loss_dummy = mean_gamma_deviance(y, dummy.predict(X))
294
+ assert loss_gbdt_gamma < loss_dummy
295
+ assert loss_gbdt_gamma < loss_gbdt_mse
296
+
297
+
298
+ @pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
299
+ def test_quantile_asymmetric_error(quantile):
300
+ """Test quantile regression for asymmetric distributed targets."""
301
+ n_samples = 10_000
302
+ rng = np.random.RandomState(42)
303
+ # take care that X @ coef + intercept > 0
304
+ X = np.concatenate(
305
+ (
306
+ np.abs(rng.randn(n_samples)[:, None]),
307
+ -rng.randint(2, size=(n_samples, 1)),
308
+ ),
309
+ axis=1,
310
+ )
311
+ intercept = 1.23
312
+ coef = np.array([0.5, -2])
313
+ # For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
314
+ # the quantile at level q is:
315
+ # quantile(q) = - log(1 - q) / lambda
316
+ # scale = 1/lambda = -quantile(q) / log(1-q)
317
+ y = rng.exponential(
318
+ scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
319
+ )
320
+ model = HistGradientBoostingRegressor(
321
+ loss="quantile",
322
+ quantile=quantile,
323
+ max_iter=25,
324
+ random_state=0,
325
+ max_leaf_nodes=10,
326
+ ).fit(X, y)
327
+ assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)
328
+
329
+ pinball_loss = PinballLoss(quantile=quantile)
330
+ loss_true_quantile = pinball_loss(y, X @ coef + intercept)
331
+ loss_pred_quantile = pinball_loss(y, model.predict(X))
332
+ # we are overfitting
333
+ assert loss_pred_quantile <= loss_true_quantile
334
+
335
+
336
+ @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
337
+ def test_poisson_y_positive(y):
338
+ # Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
339
+ err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
340
+ gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0)
341
+ with pytest.raises(ValueError, match=err_msg):
342
+ gbdt.fit(np.zeros(shape=(len(y), 1)), y)
343
+
344
+
345
+ def test_poisson():
346
+ # For Poisson distributed target, Poisson loss should give better results
347
+ # than least squares measured in Poisson deviance as metric.
348
+ rng = np.random.RandomState(42)
349
+ n_train, n_test, n_features = 500, 100, 100
350
+ X = make_low_rank_matrix(
351
+ n_samples=n_train + n_test, n_features=n_features, random_state=rng
352
+ )
353
+ # We create a log-linear Poisson model and downscale coef as it will get
354
+ # exponentiated.
355
+ coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
356
+ y = rng.poisson(lam=np.exp(X @ coef))
357
+ X_train, X_test, y_train, y_test = train_test_split(
358
+ X, y, test_size=n_test, random_state=rng
359
+ )
360
+ gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng)
361
+ gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng)
362
+ gbdt_pois.fit(X_train, y_train)
363
+ gbdt_ls.fit(X_train, y_train)
364
+ dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
365
+
366
+ for X, y in [(X_train, y_train), (X_test, y_test)]:
367
+ metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
368
+ # squared_error might produce non-positive predictions => clip
369
+ metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))
370
+ metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
371
+ assert metric_pois < metric_ls
372
+ assert metric_pois < metric_dummy
373
+
374
+
375
+ def test_binning_train_validation_are_separated():
376
+ # Make sure training and validation data are binned separately.
377
+ # See issue 13926
378
+
379
+ rng = np.random.RandomState(0)
380
+ validation_fraction = 0.2
381
+ gb = HistGradientBoostingClassifier(
382
+ early_stopping=True, validation_fraction=validation_fraction, random_state=rng
383
+ )
384
+ gb.fit(X_classification, y_classification)
385
+ mapper_training_data = gb._bin_mapper
386
+
387
+ # Note that since the data is small there is no subsampling and the
388
+ # random_state doesn't matter
389
+ mapper_whole_data = _BinMapper(random_state=0)
390
+ mapper_whole_data.fit(X_classification)
391
+
392
+ n_samples = X_classification.shape[0]
393
+ assert np.all(
394
+ mapper_training_data.n_bins_non_missing_
395
+ == int((1 - validation_fraction) * n_samples)
396
+ )
397
+ assert np.all(
398
+ mapper_training_data.n_bins_non_missing_
399
+ != mapper_whole_data.n_bins_non_missing_
400
+ )
401
+
402
+
403
+ def test_missing_values_trivial():
404
+ # sanity check for missing values support. With only one feature and
405
+ # y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
406
+ # training set.
407
+
408
+ n_samples = 100
409
+ n_features = 1
410
+ rng = np.random.RandomState(0)
411
+
412
+ X = rng.normal(size=(n_samples, n_features))
413
+ mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)
414
+ X[mask] = np.nan
415
+ y = mask.ravel()
416
+ gb = HistGradientBoostingClassifier()
417
+ gb.fit(X, y)
418
+
419
+ assert gb.score(X, y) == pytest.approx(1)
420
+
421
+
422
+ @pytest.mark.parametrize("problem", ("classification", "regression"))
423
+ @pytest.mark.parametrize(
424
+ (
425
+ "missing_proportion, expected_min_score_classification, "
426
+ "expected_min_score_regression"
427
+ ),
428
+ [(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],
429
+ )
430
+ def test_missing_values_resilience(
431
+ problem,
432
+ missing_proportion,
433
+ expected_min_score_classification,
434
+ expected_min_score_regression,
435
+ ):
436
+ # Make sure the estimators can deal with missing values and still yield
437
+ # decent predictions
438
+
439
+ rng = np.random.RandomState(0)
440
+ n_samples = 1000
441
+ n_features = 2
442
+ if problem == "regression":
443
+ X, y = make_regression(
444
+ n_samples=n_samples,
445
+ n_features=n_features,
446
+ n_informative=n_features,
447
+ random_state=rng,
448
+ )
449
+ gb = HistGradientBoostingRegressor()
450
+ expected_min_score = expected_min_score_regression
451
+ else:
452
+ X, y = make_classification(
453
+ n_samples=n_samples,
454
+ n_features=n_features,
455
+ n_informative=n_features,
456
+ n_redundant=0,
457
+ n_repeated=0,
458
+ random_state=rng,
459
+ )
460
+ gb = HistGradientBoostingClassifier()
461
+ expected_min_score = expected_min_score_classification
462
+
463
+ mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)
464
+ X[mask] = np.nan
465
+
466
+ gb.fit(X, y)
467
+
468
+ assert gb.score(X, y) > expected_min_score
469
+
470
+
471
+ @pytest.mark.parametrize(
472
+ "data",
473
+ [
474
+ make_classification(random_state=0, n_classes=2),
475
+ make_classification(random_state=0, n_classes=3, n_informative=3),
476
+ ],
477
+ ids=["binary_log_loss", "multiclass_log_loss"],
478
+ )
479
+ def test_zero_division_hessians(data):
480
+ # non regression test for issue #14018
481
+ # make sure we avoid zero division errors when computing the leaves values.
482
+
483
+ # If the learning rate is too high, the raw predictions are bad and will
484
+ # saturate the softmax (or sigmoid in binary classif). This leads to
485
+ # probabilities being exactly 0 or 1, gradients being constant, and
486
+ # hessians being zero.
487
+ X, y = data
488
+ gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
489
+ gb.fit(X, y)
490
+
491
+
492
+ def test_small_trainset():
493
+ # Make sure that the small trainset is stratified and has the expected
494
+ # length (10k samples)
495
+ n_samples = 20000
496
+ original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
497
+ rng = np.random.RandomState(42)
498
+ X = rng.randn(n_samples).reshape(n_samples, 1)
499
+ y = [
500
+ [class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()
501
+ ]
502
+ y = shuffle(np.concatenate(y))
503
+ gb = HistGradientBoostingClassifier()
504
+
505
+ # Compute the small training set
506
+ X_small, y_small, *_ = gb._get_small_trainset(
507
+ X, y, seed=42, sample_weight_train=None
508
+ )
509
+
510
+ # Compute the class distribution in the small training set
511
+ unique, counts = np.unique(y_small, return_counts=True)
512
+ small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}
513
+
514
+ # Test that the small training set has the expected length
515
+ assert X_small.shape[0] == 10000
516
+ assert y_small.shape[0] == 10000
517
+
518
+ # Test that the class distributions in the whole dataset and in the small
519
+ # training set are identical
520
+ assert small_distrib == pytest.approx(original_distrib)
521
+
522
+
523
+ def test_missing_values_minmax_imputation():
524
+ # Compare the buit-in missing value handling of Histogram GBC with an
525
+ # a-priori missing value imputation strategy that should yield the same
526
+ # results in terms of decision function.
527
+ #
528
+ # Each feature (containing NaNs) is replaced by 2 features:
529
+ # - one where the nans are replaced by min(feature) - 1
530
+ # - one where the nans are replaced by max(feature) + 1
531
+ # A split where nans go to the left has an equivalent split in the
532
+ # first (min) feature, and a split where nans go to the right has an
533
+ # equivalent split in the second (max) feature.
534
+ #
535
+ # Assuming the data is such that there is never a tie to select the best
536
+ # feature to split on during training, the learned decision trees should be
537
+ # strictly equivalent (learn a sequence of splits that encode the same
538
+ # decision function).
539
+ #
540
+ # The MinMaxImputer transformer is meant to be a toy implementation of the
541
+ # "Missing In Attributes" (MIA) missing value handling for decision trees
542
+ # https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
543
+ # The implementation of MIA as an imputation transformer was suggested by
544
+ # "Remark 3" in :arxiv:'<1902.06931>`
545
+
546
+ class MinMaxImputer(TransformerMixin, BaseEstimator):
547
+ def fit(self, X, y=None):
548
+ mm = MinMaxScaler().fit(X)
549
+ self.data_min_ = mm.data_min_
550
+ self.data_max_ = mm.data_max_
551
+ return self
552
+
553
+ def transform(self, X):
554
+ X_min, X_max = X.copy(), X.copy()
555
+
556
+ for feature_idx in range(X.shape[1]):
557
+ nan_mask = np.isnan(X[:, feature_idx])
558
+ X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
559
+ X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
560
+
561
+ return np.concatenate([X_min, X_max], axis=1)
562
+
563
+ def make_missing_value_data(n_samples=int(1e4), seed=0):
564
+ rng = np.random.RandomState(seed)
565
+ X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
566
+
567
+ # Pre-bin the data to ensure a deterministic handling by the 2
568
+ # strategies and also make it easier to insert np.nan in a structured
569
+ # way:
570
+ X = KBinsDiscretizer(n_bins=42, encode="ordinal").fit_transform(X)
571
+
572
+ # First feature has missing values completely at random:
573
+ rnd_mask = rng.rand(X.shape[0]) > 0.9
574
+ X[rnd_mask, 0] = np.nan
575
+
576
+ # Second and third features have missing values for extreme values
577
+ # (censoring missingness):
578
+ low_mask = X[:, 1] == 0
579
+ X[low_mask, 1] = np.nan
580
+
581
+ high_mask = X[:, 2] == X[:, 2].max()
582
+ X[high_mask, 2] = np.nan
583
+
584
+ # Make the last feature nan pattern very informative:
585
+ y_max = np.percentile(y, 70)
586
+ y_max_mask = y >= y_max
587
+ y[y_max_mask] = y_max
588
+ X[y_max_mask, 3] = np.nan
589
+
590
+ # Check that there is at least one missing value in each feature:
591
+ for feature_idx in range(X.shape[1]):
592
+ assert any(np.isnan(X[:, feature_idx]))
593
+
594
+ # Let's use a test set to check that the learned decision function is
595
+ # the same as evaluated on unseen data. Otherwise it could just be the
596
+ # case that we find two independent ways to overfit the training set.
597
+ return train_test_split(X, y, random_state=rng)
598
+
599
+ # n_samples need to be large enough to minimize the likelihood of having
600
+ # several candidate splits with the same gain value in a given tree.
601
+ X_train, X_test, y_train, y_test = make_missing_value_data(
602
+ n_samples=int(1e4), seed=0
603
+ )
604
+
605
+ # Use a small number of leaf nodes and iterations so as to keep
606
+ # under-fitting models to minimize the likelihood of ties when training the
607
+ # model.
608
+ gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
609
+ gbm1.fit(X_train, y_train)
610
+
611
+ gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
612
+ gbm2.fit(X_train, y_train)
613
+
614
+ # Check that the model reach the same score:
615
+ assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
616
+
617
+ assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
618
+
619
+ # Check the individual prediction match as a finer grained
620
+ # decision function check.
621
+ assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
622
+ assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
623
+
624
+
625
+ def test_infinite_values():
626
+ # Basic test for infinite values
627
+
628
+ X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
629
+ y = np.array([0, 0, 1, 1])
630
+
631
+ gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
632
+ gbdt.fit(X, y)
633
+ np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
634
+
635
+
636
+ def test_consistent_lengths():
637
+ X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
638
+ y = np.array([0, 0, 1, 1])
639
+ sample_weight = np.array([0.1, 0.3, 0.1])
640
+ gbdt = HistGradientBoostingRegressor()
641
+ with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"):
642
+ gbdt.fit(X, y, sample_weight)
643
+
644
+ with pytest.raises(
645
+ ValueError, match="Found input variables with inconsistent number"
646
+ ):
647
+ gbdt.fit(X, y[1:])
648
+
649
+
650
+ def test_infinite_values_missing_values():
651
+ # High level test making sure that inf and nan values are properly handled
652
+ # when both are present. This is similar to
653
+ # test_split_on_nan_with_infinite_values() in test_grower.py, though we
654
+ # cannot check the predictions for binned values here.
655
+
656
+ X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
657
+ y_isnan = np.isnan(X.ravel())
658
+ y_isinf = X.ravel() == np.inf
659
+
660
+ stump_clf = HistGradientBoostingClassifier(
661
+ min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2
662
+ )
663
+
664
+ assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
665
+ assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
666
+
667
+
668
+ @pytest.mark.parametrize("scoring", [None, "loss"])
669
+ def test_string_target_early_stopping(scoring):
670
+ # Regression tests for #14709 where the targets need to be encoded before
671
+ # to compute the score
672
+ rng = np.random.RandomState(42)
673
+ X = rng.randn(100, 10)
674
+ y = np.array(["x"] * 50 + ["y"] * 50, dtype=object)
675
+ gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
676
+ gbrt.fit(X, y)
677
+
678
+
679
+ def test_zero_sample_weights_regression():
680
+ # Make sure setting a SW to zero amounts to ignoring the corresponding
681
+ # sample
682
+
683
+ X = [[1, 0], [1, 0], [1, 0], [0, 1]]
684
+ y = [0, 0, 1, 0]
685
+ # ignore the first 2 training samples by setting their weight to 0
686
+ sample_weight = [0, 0, 1, 1]
687
+ gb = HistGradientBoostingRegressor(min_samples_leaf=1)
688
+ gb.fit(X, y, sample_weight=sample_weight)
689
+ assert gb.predict([[1, 0]])[0] > 0.5
690
+
691
+
692
+ def test_zero_sample_weights_classification():
693
+ # Make sure setting a SW to zero amounts to ignoring the corresponding
694
+ # sample
695
+
696
+ X = [[1, 0], [1, 0], [1, 0], [0, 1]]
697
+ y = [0, 0, 1, 0]
698
+ # ignore the first 2 training samples by setting their weight to 0
699
+ sample_weight = [0, 0, 1, 1]
700
+ gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
701
+ gb.fit(X, y, sample_weight=sample_weight)
702
+ assert_array_equal(gb.predict([[1, 0]]), [1])
703
+
704
+ X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]
705
+ y = [0, 0, 1, 0, 2]
706
+ # ignore the first 2 training samples by setting their weight to 0
707
+ sample_weight = [0, 0, 1, 1, 1]
708
+ gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
709
+ gb.fit(X, y, sample_weight=sample_weight)
710
+ assert_array_equal(gb.predict([[1, 0]]), [1])
711
+
712
+
713
+ @pytest.mark.parametrize(
714
+ "problem", ("regression", "binary_classification", "multiclass_classification")
715
+ )
716
+ @pytest.mark.parametrize("duplication", ("half", "all"))
717
+ def test_sample_weight_effect(problem, duplication):
718
+ # High level test to make sure that duplicating a sample is equivalent to
719
+ # giving it weight of 2.
720
+
721
+ # fails for n_samples > 255 because binning does not take sample weights
722
+ # into account. Keeping n_samples <= 255 makes
723
+ # sure only unique values are used so SW have no effect on binning.
724
+ n_samples = 255
725
+ n_features = 2
726
+ if problem == "regression":
727
+ X, y = make_regression(
728
+ n_samples=n_samples,
729
+ n_features=n_features,
730
+ n_informative=n_features,
731
+ random_state=0,
732
+ )
733
+ Klass = HistGradientBoostingRegressor
734
+ else:
735
+ n_classes = 2 if problem == "binary_classification" else 3
736
+ X, y = make_classification(
737
+ n_samples=n_samples,
738
+ n_features=n_features,
739
+ n_informative=n_features,
740
+ n_redundant=0,
741
+ n_clusters_per_class=1,
742
+ n_classes=n_classes,
743
+ random_state=0,
744
+ )
745
+ Klass = HistGradientBoostingClassifier
746
+
747
+ # This test can't pass if min_samples_leaf > 1 because that would force 2
748
+ # samples to be in the same node in est_sw, while these samples would be
749
+ # free to be separate in est_dup: est_dup would just group together the
750
+ # duplicated samples.
751
+ est = Klass(min_samples_leaf=1)
752
+
753
+ # Create dataset with duplicate and corresponding sample weights
754
+ if duplication == "half":
755
+ lim = n_samples // 2
756
+ else:
757
+ lim = n_samples
758
+ X_dup = np.r_[X, X[:lim]]
759
+ y_dup = np.r_[y, y[:lim]]
760
+ sample_weight = np.ones(shape=(n_samples))
761
+ sample_weight[:lim] = 2
762
+
763
+ est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
764
+ est_dup = clone(est).fit(X_dup, y_dup)
765
+
766
+ # checking raw_predict is stricter than just predict for classification
767
+ assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))
768
+
769
+
770
+ @pytest.mark.parametrize("Loss", (HalfSquaredError, AbsoluteError))
771
+ def test_sum_hessians_are_sample_weight(Loss):
772
+ # For losses with constant hessians, the sum_hessians field of the
773
+ # histograms must be equal to the sum of the sample weight of samples at
774
+ # the corresponding bin.
775
+
776
+ rng = np.random.RandomState(0)
777
+ n_samples = 1000
778
+ n_features = 2
779
+ X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)
780
+ bin_mapper = _BinMapper()
781
+ X_binned = bin_mapper.fit_transform(X)
782
+
783
+ # While sample weights are supposed to be positive, this still works.
784
+ sample_weight = rng.normal(size=n_samples)
785
+
786
+ loss = Loss(sample_weight=sample_weight)
787
+ gradients, hessians = loss.init_gradient_and_hessian(
788
+ n_samples=n_samples, dtype=G_H_DTYPE
789
+ )
790
+ gradients, hessians = gradients.reshape((-1, 1)), hessians.reshape((-1, 1))
791
+ raw_predictions = rng.normal(size=(n_samples, 1))
792
+ loss.gradient_hessian(
793
+ y_true=y,
794
+ raw_prediction=raw_predictions,
795
+ sample_weight=sample_weight,
796
+ gradient_out=gradients,
797
+ hessian_out=hessians,
798
+ n_threads=n_threads,
799
+ )
800
+
801
+ # build sum_sample_weight which contains the sum of the sample weights at
802
+ # each bin (for each feature). This must be equal to the sum_hessians
803
+ # field of the corresponding histogram
804
+ sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
805
+ for feature_idx in range(n_features):
806
+ for sample_idx in range(n_samples):
807
+ sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[
808
+ sample_idx
809
+ ]
810
+
811
+ # Build histogram
812
+ grower = TreeGrower(
813
+ X_binned, gradients[:, 0], hessians[:, 0], n_bins=bin_mapper.n_bins
814
+ )
815
+ histograms = grower.histogram_builder.compute_histograms_brute(
816
+ grower.root.sample_indices
817
+ )
818
+
819
+ for feature_idx in range(n_features):
820
+ for bin_idx in range(bin_mapper.n_bins):
821
+ assert histograms[feature_idx, bin_idx]["sum_hessians"] == (
822
+ pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)
823
+ )
824
+
825
+
826
+ def test_max_depth_max_leaf_nodes():
827
+ # Non regression test for
828
+ # https://github.com/scikit-learn/scikit-learn/issues/16179
829
+ # there was a bug when the max_depth and the max_leaf_nodes criteria were
830
+ # met at the same time, which would lead to max_leaf_nodes not being
831
+ # respected.
832
+ X, y = make_classification(random_state=0)
833
+ est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(
834
+ X, y
835
+ )
836
+ tree = est._predictors[0][0]
837
+ assert tree.get_max_depth() == 2
838
+ assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
839
+
840
+
841
+ def test_early_stopping_on_test_set_with_warm_start():
842
+ # Non regression test for #16661 where second fit fails with
843
+ # warm_start=True, early_stopping is on, and no validation set
844
+ X, y = make_classification(random_state=0)
845
+ gb = HistGradientBoostingClassifier(
846
+ max_iter=1,
847
+ scoring="loss",
848
+ warm_start=True,
849
+ early_stopping=True,
850
+ n_iter_no_change=1,
851
+ validation_fraction=None,
852
+ )
853
+
854
+ gb.fit(X, y)
855
+ # does not raise on second call
856
+ gb.set_params(max_iter=2)
857
+ gb.fit(X, y)
858
+
859
+
860
+ def test_early_stopping_with_sample_weights(monkeypatch):
861
+ """Check that sample weights is passed in to the scorer and _raw_predict is not
862
+ called."""
863
+
864
+ mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
865
+
866
+ def mock_check_scoring(estimator, scoring):
867
+ assert scoring == "neg_median_absolute_error"
868
+ return mock_scorer
869
+
870
+ monkeypatch.setattr(
871
+ sklearn.ensemble._hist_gradient_boosting.gradient_boosting,
872
+ "check_scoring",
873
+ mock_check_scoring,
874
+ )
875
+
876
+ X, y = make_regression(random_state=0)
877
+ sample_weight = np.ones_like(y)
878
+ hist = HistGradientBoostingRegressor(
879
+ max_iter=2,
880
+ early_stopping=True,
881
+ random_state=0,
882
+ scoring="neg_median_absolute_error",
883
+ )
884
+ mock_raw_predict = Mock(side_effect=hist._raw_predict)
885
+ hist._raw_predict = mock_raw_predict
886
+ hist.fit(X, y, sample_weight=sample_weight)
887
+
888
+ # _raw_predict should never be called with scoring as a string
889
+ assert mock_raw_predict.call_count == 0
890
+
891
+ # For scorer is called twice (train and val) for the baseline score, and twice
892
+ # per iteration (train and val) after that. So 6 times in total for `max_iter=2`.
893
+ assert mock_scorer.call_count == 6
894
+ for arg_list in mock_scorer.call_args_list:
895
+ assert "sample_weight" in arg_list[1]
896
+
897
+
898
+ def test_raw_predict_is_called_with_custom_scorer():
899
+ """Custom scorer will still call _raw_predict."""
900
+
901
+ mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
902
+
903
+ X, y = make_regression(random_state=0)
904
+ hist = HistGradientBoostingRegressor(
905
+ max_iter=2,
906
+ early_stopping=True,
907
+ random_state=0,
908
+ scoring=mock_scorer,
909
+ )
910
+ mock_raw_predict = Mock(side_effect=hist._raw_predict)
911
+ hist._raw_predict = mock_raw_predict
912
+ hist.fit(X, y)
913
+
914
+ # `_raw_predict` and scorer is called twice (train and val) for the baseline score,
915
+ # and twice per iteration (train and val) after that. So 6 times in total for
916
+ # `max_iter=2`.
917
+ assert mock_raw_predict.call_count == 6
918
+ assert mock_scorer.call_count == 6
919
+
920
+
921
+ @pytest.mark.parametrize(
922
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
923
+ )
924
+ def test_single_node_trees(Est):
925
+ # Make sure it's still possible to build single-node trees. In that case
926
+ # the value of the root is set to 0. That's a correct value: if the tree is
927
+ # single-node that's because min_gain_to_split is not respected right from
928
+ # the root, so we don't want the tree to have any impact on the
929
+ # predictions.
930
+
931
+ X, y = make_classification(random_state=0)
932
+ y[:] = 1 # constant target will lead to a single root node
933
+
934
+ est = Est(max_iter=20)
935
+ est.fit(X, y)
936
+
937
+ assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)
938
+ assert all(predictor[0].nodes[0]["value"] == 0 for predictor in est._predictors)
939
+ # Still gives correct predictions thanks to the baseline prediction
940
+ assert_allclose(est.predict(X), y)
941
+
942
+
943
+ @pytest.mark.parametrize(
944
+ "Est, loss, X, y",
945
+ [
946
+ (
947
+ HistGradientBoostingClassifier,
948
+ HalfBinomialLoss(sample_weight=None),
949
+ X_classification,
950
+ y_classification,
951
+ ),
952
+ (
953
+ HistGradientBoostingRegressor,
954
+ HalfSquaredError(sample_weight=None),
955
+ X_regression,
956
+ y_regression,
957
+ ),
958
+ ],
959
+ )
960
+ def test_custom_loss(Est, loss, X, y):
961
+ est = Est(loss=loss, max_iter=20)
962
+ est.fit(X, y)
963
+
964
+
965
+ @pytest.mark.parametrize(
966
+ "HistGradientBoosting, X, y",
967
+ [
968
+ (HistGradientBoostingClassifier, X_classification, y_classification),
969
+ (HistGradientBoostingRegressor, X_regression, y_regression),
970
+ (
971
+ HistGradientBoostingClassifier,
972
+ X_multi_classification,
973
+ y_multi_classification,
974
+ ),
975
+ ],
976
+ )
977
+ def test_staged_predict(HistGradientBoosting, X, y):
978
+ # Test whether staged predictor eventually gives
979
+ # the same prediction.
980
+ X_train, X_test, y_train, y_test = train_test_split(
981
+ X, y, test_size=0.5, random_state=0
982
+ )
983
+ gb = HistGradientBoosting(max_iter=10)
984
+
985
+ # test raise NotFittedError if not fitted
986
+ with pytest.raises(NotFittedError):
987
+ next(gb.staged_predict(X_test))
988
+
989
+ gb.fit(X_train, y_train)
990
+
991
+ # test if the staged predictions of each iteration
992
+ # are equal to the corresponding predictions of the same estimator
993
+ # trained from scratch.
994
+ # this also test limit case when max_iter = 1
995
+ method_names = (
996
+ ["predict"]
997
+ if is_regressor(gb)
998
+ else ["predict", "predict_proba", "decision_function"]
999
+ )
1000
+ for method_name in method_names:
1001
+ staged_method = getattr(gb, "staged_" + method_name)
1002
+ staged_predictions = list(staged_method(X_test))
1003
+ assert len(staged_predictions) == gb.n_iter_
1004
+ for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):
1005
+ aux = HistGradientBoosting(max_iter=n_iter)
1006
+ aux.fit(X_train, y_train)
1007
+ pred_aux = getattr(aux, method_name)(X_test)
1008
+
1009
+ assert_allclose(staged_predictions, pred_aux)
1010
+ assert staged_predictions.shape == pred_aux.shape
1011
+
1012
+
1013
+ @pytest.mark.parametrize("insert_missing", [False, True])
1014
+ @pytest.mark.parametrize(
1015
+ "Est", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)
1016
+ )
1017
+ @pytest.mark.parametrize("bool_categorical_parameter", [True, False])
1018
+ @pytest.mark.parametrize("missing_value", [np.nan, -1])
1019
+ def test_unknown_categories_nan(
1020
+ insert_missing, Est, bool_categorical_parameter, missing_value
1021
+ ):
1022
+ # Make sure no error is raised at predict if a category wasn't seen during
1023
+ # fit. We also make sure they're treated as nans.
1024
+
1025
+ rng = np.random.RandomState(0)
1026
+ n_samples = 1000
1027
+ f1 = rng.rand(n_samples)
1028
+ f2 = rng.randint(4, size=n_samples)
1029
+ X = np.c_[f1, f2]
1030
+ y = np.zeros(shape=n_samples)
1031
+ y[X[:, 1] % 2 == 0] = 1
1032
+
1033
+ if bool_categorical_parameter:
1034
+ categorical_features = [False, True]
1035
+ else:
1036
+ categorical_features = [1]
1037
+
1038
+ if insert_missing:
1039
+ mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)
1040
+ assert mask.sum() > 0
1041
+ X[mask] = missing_value
1042
+
1043
+ est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)
1044
+ assert_array_equal(est.is_categorical_, [False, True])
1045
+
1046
+ # Make sure no error is raised on unknown categories and nans
1047
+ # unknown categories will be treated as nans
1048
+ X_test = np.zeros((10, X.shape[1]), dtype=float)
1049
+ X_test[:5, 1] = 30
1050
+ X_test[5:, 1] = missing_value
1051
+ assert len(np.unique(est.predict(X_test))) == 1
1052
+
1053
+
1054
+ def test_categorical_encoding_strategies():
1055
+ # Check native categorical handling vs different encoding strategies. We
1056
+ # make sure that native encoding needs only 1 split to achieve a perfect
1057
+ # prediction on a simple dataset. In contrast, OneHotEncoded data needs
1058
+ # more depth / splits, and treating categories as ordered (just using
1059
+ # OrdinalEncoder) requires even more depth.
1060
+
1061
+ # dataset with one random continuous feature, and one categorical feature
1062
+ # with values in [0, 5], e.g. from an OrdinalEncoder.
1063
+ # class == 1 iff categorical value in {0, 2, 4}
1064
+ rng = np.random.RandomState(0)
1065
+ n_samples = 10_000
1066
+ f1 = rng.rand(n_samples)
1067
+ f2 = rng.randint(6, size=n_samples)
1068
+ X = np.c_[f1, f2]
1069
+ y = np.zeros(shape=n_samples)
1070
+ y[X[:, 1] % 2 == 0] = 1
1071
+
1072
+ # make sure dataset is balanced so that the baseline_prediction doesn't
1073
+ # influence predictions too much with max_iter = 1
1074
+ assert 0.49 < y.mean() < 0.51
1075
+
1076
+ native_cat_specs = [
1077
+ [False, True],
1078
+ [1],
1079
+ ]
1080
+ try:
1081
+ import pandas as pd
1082
+
1083
+ X = pd.DataFrame(X, columns=["f_0", "f_1"])
1084
+ native_cat_specs.append(["f_1"])
1085
+ except ImportError:
1086
+ pass
1087
+
1088
+ for native_cat_spec in native_cat_specs:
1089
+ clf_cat = HistGradientBoostingClassifier(
1090
+ max_iter=1, max_depth=1, categorical_features=native_cat_spec
1091
+ )
1092
+ clf_cat.fit(X, y)
1093
+
1094
+ # Using native categorical encoding, we get perfect predictions with just
1095
+ # one split
1096
+ assert cross_val_score(clf_cat, X, y).mean() == 1
1097
+
1098
+ # quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21
1099
+ expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]
1100
+ left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]
1101
+ assert_array_equal(left_bitset, expected_left_bitset)
1102
+
1103
+ # Treating categories as ordered, we need more depth / more splits to get
1104
+ # the same predictions
1105
+ clf_no_cat = HistGradientBoostingClassifier(
1106
+ max_iter=1, max_depth=4, categorical_features=None
1107
+ )
1108
+ assert cross_val_score(clf_no_cat, X, y).mean() < 0.9
1109
+
1110
+ clf_no_cat.set_params(max_depth=5)
1111
+ assert cross_val_score(clf_no_cat, X, y).mean() == 1
1112
+
1113
+ # Using OHEd data, we need less splits than with pure OEd data, but we
1114
+ # still need more splits than with the native categorical splits
1115
+ ct = make_column_transformer(
1116
+ (OneHotEncoder(sparse_output=False), [1]), remainder="passthrough"
1117
+ )
1118
+ X_ohe = ct.fit_transform(X)
1119
+ clf_no_cat.set_params(max_depth=2)
1120
+ assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9
1121
+
1122
+ clf_no_cat.set_params(max_depth=3)
1123
+ assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1
1124
+
1125
+
1126
+ @pytest.mark.parametrize(
1127
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1128
+ )
1129
+ @pytest.mark.parametrize(
1130
+ "categorical_features, monotonic_cst, expected_msg",
1131
+ [
1132
+ (
1133
+ [b"hello", b"world"],
1134
+ None,
1135
+ re.escape(
1136
+ "categorical_features must be an array-like of bool, int or str, "
1137
+ "got: bytes40."
1138
+ ),
1139
+ ),
1140
+ (
1141
+ np.array([b"hello", 1.3], dtype=object),
1142
+ None,
1143
+ re.escape(
1144
+ "categorical_features must be an array-like of bool, int or str, "
1145
+ "got: bytes, float."
1146
+ ),
1147
+ ),
1148
+ (
1149
+ [0, -1],
1150
+ None,
1151
+ re.escape(
1152
+ "categorical_features set as integer indices must be in "
1153
+ "[0, n_features - 1]"
1154
+ ),
1155
+ ),
1156
+ (
1157
+ [True, True, False, False, True],
1158
+ None,
1159
+ re.escape(
1160
+ "categorical_features set as a boolean mask must have shape "
1161
+ "(n_features,)"
1162
+ ),
1163
+ ),
1164
+ (
1165
+ [True, True, False, False],
1166
+ [0, -1, 0, 1],
1167
+ "Categorical features cannot have monotonic constraints",
1168
+ ),
1169
+ ],
1170
+ )
1171
+ def test_categorical_spec_errors(
1172
+ Est, categorical_features, monotonic_cst, expected_msg
1173
+ ):
1174
+ # Test errors when categories are specified incorrectly
1175
+ n_samples = 100
1176
+ X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)
1177
+ rng = np.random.RandomState(0)
1178
+ X[:, 0] = rng.randint(0, 10, size=n_samples)
1179
+ X[:, 1] = rng.randint(0, 10, size=n_samples)
1180
+ est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)
1181
+
1182
+ with pytest.raises(ValueError, match=expected_msg):
1183
+ est.fit(X, y)
1184
+
1185
+
1186
+ @pytest.mark.parametrize(
1187
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1188
+ )
1189
+ def test_categorical_spec_errors_with_feature_names(Est):
1190
+ pd = pytest.importorskip("pandas")
1191
+ n_samples = 10
1192
+ X = pd.DataFrame(
1193
+ {
1194
+ "f0": range(n_samples),
1195
+ "f1": range(n_samples),
1196
+ "f2": [1.0] * n_samples,
1197
+ }
1198
+ )
1199
+ y = [0, 1] * (n_samples // 2)
1200
+
1201
+ est = Est(categorical_features=["f0", "f1", "f3"])
1202
+ expected_msg = re.escape(
1203
+ "categorical_features has a item value 'f3' which is not a valid "
1204
+ "feature name of the training data."
1205
+ )
1206
+ with pytest.raises(ValueError, match=expected_msg):
1207
+ est.fit(X, y)
1208
+
1209
+ est = Est(categorical_features=["f0", "f1"])
1210
+ expected_msg = re.escape(
1211
+ "categorical_features should be passed as an array of integers or "
1212
+ "as a boolean mask when the model is fitted on data without feature "
1213
+ "names."
1214
+ )
1215
+ with pytest.raises(ValueError, match=expected_msg):
1216
+ est.fit(X.to_numpy(), y)
1217
+
1218
+
1219
+ @pytest.mark.parametrize(
1220
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1221
+ )
1222
+ @pytest.mark.parametrize("categorical_features", ([False, False], []))
1223
+ @pytest.mark.parametrize("as_array", (True, False))
1224
+ def test_categorical_spec_no_categories(Est, categorical_features, as_array):
1225
+ # Make sure we can properly detect that no categorical features are present
1226
+ # even if the categorical_features parameter is not None
1227
+ X = np.arange(10).reshape(5, 2)
1228
+ y = np.arange(5)
1229
+ if as_array:
1230
+ categorical_features = np.asarray(categorical_features)
1231
+ est = Est(categorical_features=categorical_features).fit(X, y)
1232
+ assert est.is_categorical_ is None
1233
+
1234
+
1235
+ @pytest.mark.parametrize(
1236
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1237
+ )
1238
+ @pytest.mark.parametrize(
1239
+ "use_pandas, feature_name", [(False, "at index 0"), (True, "'f0'")]
1240
+ )
1241
+ def test_categorical_bad_encoding_errors(Est, use_pandas, feature_name):
1242
+ # Test errors when categories are encoded incorrectly
1243
+
1244
+ gb = Est(categorical_features=[True], max_bins=2)
1245
+
1246
+ if use_pandas:
1247
+ pd = pytest.importorskip("pandas")
1248
+ X = pd.DataFrame({"f0": [0, 1, 2]})
1249
+ else:
1250
+ X = np.array([[0, 1, 2]]).T
1251
+ y = np.arange(3)
1252
+ msg = (
1253
+ f"Categorical feature {feature_name} is expected to have a "
1254
+ "cardinality <= 2 but actually has a cardinality of 3."
1255
+ )
1256
+ with pytest.raises(ValueError, match=msg):
1257
+ gb.fit(X, y)
1258
+
1259
+ # nans are ignored in the counts
1260
+ X = np.array([[0, 1, np.nan]]).T
1261
+ y = np.arange(3)
1262
+ gb.fit(X, y)
1263
+
1264
+
1265
+ @pytest.mark.parametrize(
1266
+ "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)
1267
+ )
1268
+ def test_uint8_predict(Est):
1269
+ # Non regression test for
1270
+ # https://github.com/scikit-learn/scikit-learn/issues/18408
1271
+ # Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It
1272
+ # will be converted to X_DTYPE.
1273
+
1274
+ rng = np.random.RandomState(0)
1275
+
1276
+ X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)
1277
+ y = rng.randint(0, 2, size=10).astype(np.uint8)
1278
+ est = Est()
1279
+ est.fit(X, y)
1280
+ est.predict(X)
1281
+
1282
+
1283
+ @pytest.mark.parametrize(
1284
+ "interaction_cst, n_features, result",
1285
+ [
1286
+ (None, 931, None),
1287
+ ([{0, 1}], 2, [{0, 1}]),
1288
+ ("pairwise", 2, [{0, 1}]),
1289
+ ("pairwise", 4, [{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}]),
1290
+ ("no_interactions", 2, [{0}, {1}]),
1291
+ ("no_interactions", 4, [{0}, {1}, {2}, {3}]),
1292
+ ([(1, 0), [5, 1]], 6, [{0, 1}, {1, 5}, {2, 3, 4}]),
1293
+ ],
1294
+ )
1295
+ def test_check_interaction_cst(interaction_cst, n_features, result):
1296
+ """Check that _check_interaction_cst returns the expected list of sets"""
1297
+ est = HistGradientBoostingRegressor()
1298
+ est.set_params(interaction_cst=interaction_cst)
1299
+ assert est._check_interaction_cst(n_features) == result
1300
+
1301
+
1302
+ def test_interaction_cst_numerically():
1303
+ """Check that interaction constraints have no forbidden interactions."""
1304
+ rng = np.random.RandomState(42)
1305
+ n_samples = 1000
1306
+ X = rng.uniform(size=(n_samples, 2))
1307
+ # Construct y with a strong interaction term
1308
+ # y = x0 + x1 + 5 * x0 * x1
1309
+ y = np.hstack((X, 5 * X[:, [0]] * X[:, [1]])).sum(axis=1)
1310
+
1311
+ est = HistGradientBoostingRegressor(random_state=42)
1312
+ est.fit(X, y)
1313
+ est_no_interactions = HistGradientBoostingRegressor(
1314
+ interaction_cst=[{0}, {1}], random_state=42
1315
+ )
1316
+ est_no_interactions.fit(X, y)
1317
+
1318
+ delta = 0.25
1319
+ # Make sure we do not extrapolate out of the training set as tree-based estimators
1320
+ # are very bad in doing so.
1321
+ X_test = X[(X[:, 0] < 1 - delta) & (X[:, 1] < 1 - delta)]
1322
+ X_delta_d_0 = X_test + [delta, 0]
1323
+ X_delta_0_d = X_test + [0, delta]
1324
+ X_delta_d_d = X_test + [delta, delta]
1325
+
1326
+ # Note: For the y from above as a function of x0 and x1, we have
1327
+ # y(x0+d, x1+d) = y(x0, x1) + 5 * d * (2/5 + x0 + x1) + 5 * d**2
1328
+ # y(x0+d, x1) = y(x0, x1) + 5 * d * (1/5 + x1)
1329
+ # y(x0, x1+d) = y(x0, x1) + 5 * d * (1/5 + x0)
1330
+ # Without interaction constraints, we would expect a result of 5 * d**2 for the
1331
+ # following expression, but zero with constraints in place.
1332
+ assert_allclose(
1333
+ est_no_interactions.predict(X_delta_d_d)
1334
+ + est_no_interactions.predict(X_test)
1335
+ - est_no_interactions.predict(X_delta_d_0)
1336
+ - est_no_interactions.predict(X_delta_0_d),
1337
+ 0,
1338
+ atol=1e-12,
1339
+ )
1340
+
1341
+ # Correct result of the expressions is 5 * delta**2. But this is hard to achieve by
1342
+ # a fitted tree-based model. However, with 100 iterations the expression should
1343
+ # at least be positive!
1344
+ assert np.all(
1345
+ est.predict(X_delta_d_d)
1346
+ + est.predict(X_test)
1347
+ - est.predict(X_delta_d_0)
1348
+ - est.predict(X_delta_0_d)
1349
+ > 0.01
1350
+ )
1351
+
1352
+
1353
+ def test_no_user_warning_with_scoring():
1354
+ """Check that no UserWarning is raised when scoring is set.
1355
+
1356
+ Non-regression test for #22907.
1357
+ """
1358
+ pd = pytest.importorskip("pandas")
1359
+ X, y = make_regression(n_samples=50, random_state=0)
1360
+ X_df = pd.DataFrame(X, columns=[f"col{i}" for i in range(X.shape[1])])
1361
+
1362
+ est = HistGradientBoostingRegressor(
1363
+ random_state=0, scoring="neg_mean_absolute_error", early_stopping=True
1364
+ )
1365
+ with warnings.catch_warnings():
1366
+ warnings.simplefilter("error", UserWarning)
1367
+ est.fit(X_df, y)
1368
+
1369
+
1370
+ def test_class_weights():
1371
+ """High level test to check class_weights."""
1372
+ n_samples = 255
1373
+ n_features = 2
1374
+
1375
+ X, y = make_classification(
1376
+ n_samples=n_samples,
1377
+ n_features=n_features,
1378
+ n_informative=n_features,
1379
+ n_redundant=0,
1380
+ n_clusters_per_class=1,
1381
+ n_classes=2,
1382
+ random_state=0,
1383
+ )
1384
+ y_is_1 = y == 1
1385
+
1386
+ # class_weight is the same as sample weights with the corresponding class
1387
+ clf = HistGradientBoostingClassifier(
1388
+ min_samples_leaf=2, random_state=0, max_depth=2
1389
+ )
1390
+ sample_weight = np.ones(shape=(n_samples))
1391
+ sample_weight[y_is_1] = 3.0
1392
+ clf.fit(X, y, sample_weight=sample_weight)
1393
+
1394
+ class_weight = {0: 1.0, 1: 3.0}
1395
+ clf_class_weighted = clone(clf).set_params(class_weight=class_weight)
1396
+ clf_class_weighted.fit(X, y)
1397
+
1398
+ assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
1399
+
1400
+ # Check that sample_weight and class_weight are multiplicative
1401
+ clf.fit(X, y, sample_weight=sample_weight**2)
1402
+ clf_class_weighted.fit(X, y, sample_weight=sample_weight)
1403
+ assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X))
1404
+
1405
+ # Make imbalanced dataset
1406
+ X_imb = np.concatenate((X[~y_is_1], X[y_is_1][:10]))
1407
+ y_imb = np.concatenate((y[~y_is_1], y[y_is_1][:10]))
1408
+
1409
+ # class_weight="balanced" is the same as sample_weights to be
1410
+ # inversely proportional to n_samples / (n_classes * np.bincount(y))
1411
+ clf_balanced = clone(clf).set_params(class_weight="balanced")
1412
+ clf_balanced.fit(X_imb, y_imb)
1413
+
1414
+ class_weight = y_imb.shape[0] / (2 * np.bincount(y_imb))
1415
+ sample_weight = class_weight[y_imb]
1416
+ clf_sample_weight = clone(clf).set_params(class_weight=None)
1417
+ clf_sample_weight.fit(X_imb, y_imb, sample_weight=sample_weight)
1418
+
1419
+ assert_allclose(
1420
+ clf_balanced.decision_function(X_imb),
1421
+ clf_sample_weight.decision_function(X_imb),
1422
+ )
1423
+
1424
+
1425
+ def test_unknown_category_that_are_negative():
1426
+ """Check that unknown categories that are negative does not error.
1427
+
1428
+ Non-regression test for #24274.
1429
+ """
1430
+ rng = np.random.RandomState(42)
1431
+ n_samples = 1000
1432
+ X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)]
1433
+ y = np.zeros(shape=n_samples)
1434
+ y[X[:, 1] % 2 == 0] = 1
1435
+
1436
+ hist = HistGradientBoostingRegressor(
1437
+ random_state=0,
1438
+ categorical_features=[False, True],
1439
+ max_iter=10,
1440
+ ).fit(X, y)
1441
+
1442
+ # Check that negative values from the second column are treated like a
1443
+ # missing category
1444
+ X_test_neg = np.asarray([[1, -2], [3, -4]])
1445
+ X_test_nan = np.asarray([[1, np.nan], [3, np.nan]])
1446
+
1447
+ assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
1448
+
1449
+
1450
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
1451
+ @pytest.mark.parametrize(
1452
+ "HistGradientBoosting",
1453
+ [HistGradientBoostingClassifier, HistGradientBoostingRegressor],
1454
+ )
1455
+ def test_dataframe_categorical_results_same_as_ndarray(
1456
+ dataframe_lib, HistGradientBoosting
1457
+ ):
1458
+ """Check that pandas categorical give the same results as ndarray."""
1459
+ pytest.importorskip(dataframe_lib)
1460
+
1461
+ rng = np.random.RandomState(42)
1462
+ n_samples = 5_000
1463
+ n_cardinality = 50
1464
+ max_bins = 100
1465
+ f_num = rng.rand(n_samples)
1466
+ f_cat = rng.randint(n_cardinality, size=n_samples)
1467
+
1468
+ # Make f_cat an informative feature
1469
+ y = (f_cat % 3 == 0) & (f_num > 0.2)
1470
+
1471
+ X = np.c_[f_num, f_cat]
1472
+ f_cat = [f"cat{c:0>3}" for c in f_cat]
1473
+ X_df = _convert_container(
1474
+ np.asarray([f_num, f_cat]).T,
1475
+ dataframe_lib,
1476
+ ["f_num", "f_cat"],
1477
+ categorical_feature_names=["f_cat"],
1478
+ )
1479
+
1480
+ X_train, X_test, X_train_df, X_test_df, y_train, y_test = train_test_split(
1481
+ X, X_df, y, random_state=0
1482
+ )
1483
+
1484
+ hist_kwargs = dict(max_iter=10, max_bins=max_bins, random_state=0)
1485
+ hist_np = HistGradientBoosting(categorical_features=[False, True], **hist_kwargs)
1486
+ hist_np.fit(X_train, y_train)
1487
+
1488
+ hist_pd = HistGradientBoosting(categorical_features="from_dtype", **hist_kwargs)
1489
+ hist_pd.fit(X_train_df, y_train)
1490
+
1491
+ # Check categories are correct and sorted
1492
+ categories = hist_pd._preprocessor.named_transformers_["encoder"].categories_[0]
1493
+ assert_array_equal(categories, np.unique(f_cat))
1494
+
1495
+ assert len(hist_np._predictors) == len(hist_pd._predictors)
1496
+ for predictor_1, predictor_2 in zip(hist_np._predictors, hist_pd._predictors):
1497
+ assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
1498
+
1499
+ score_np = hist_np.score(X_test, y_test)
1500
+ score_pd = hist_pd.score(X_test_df, y_test)
1501
+ assert score_np == pytest.approx(score_pd)
1502
+ assert_allclose(hist_np.predict(X_test), hist_pd.predict(X_test_df))
1503
+
1504
+
1505
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
1506
+ @pytest.mark.parametrize(
1507
+ "HistGradientBoosting",
1508
+ [HistGradientBoostingClassifier, HistGradientBoostingRegressor],
1509
+ )
1510
+ def test_dataframe_categorical_errors(dataframe_lib, HistGradientBoosting):
1511
+ """Check error cases for pandas categorical feature."""
1512
+ pytest.importorskip(dataframe_lib)
1513
+ msg = "Categorical feature 'f_cat' is expected to have a cardinality <= 16"
1514
+ hist = HistGradientBoosting(categorical_features="from_dtype", max_bins=16)
1515
+
1516
+ rng = np.random.RandomState(42)
1517
+ f_cat = rng.randint(0, high=100, size=100).astype(str)
1518
+ X_df = _convert_container(
1519
+ f_cat[:, None], dataframe_lib, ["f_cat"], categorical_feature_names=["f_cat"]
1520
+ )
1521
+ y = rng.randint(0, high=2, size=100)
1522
+
1523
+ with pytest.raises(ValueError, match=msg):
1524
+ hist.fit(X_df, y)
1525
+
1526
+
1527
+ @pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
1528
+ def test_categorical_different_order_same_model(dataframe_lib):
1529
+ """Check that the order of the categorical gives same model."""
1530
+ pytest.importorskip(dataframe_lib)
1531
+ rng = np.random.RandomState(42)
1532
+ n_samples = 1_000
1533
+ f_ints = rng.randint(low=0, high=2, size=n_samples)
1534
+
1535
+ # Construct a target with some noise
1536
+ y = f_ints.copy()
1537
+ flipped = rng.choice([True, False], size=n_samples, p=[0.1, 0.9])
1538
+ y[flipped] = 1 - y[flipped]
1539
+
1540
+ # Construct categorical where 0 -> A and 1 -> B and 1 -> A and 0 -> B
1541
+ f_cat_a_b = np.asarray(["A", "B"])[f_ints]
1542
+ f_cat_b_a = np.asarray(["B", "A"])[f_ints]
1543
+ df_a_b = _convert_container(
1544
+ f_cat_a_b[:, None],
1545
+ dataframe_lib,
1546
+ ["f_cat"],
1547
+ categorical_feature_names=["f_cat"],
1548
+ )
1549
+ df_b_a = _convert_container(
1550
+ f_cat_b_a[:, None],
1551
+ dataframe_lib,
1552
+ ["f_cat"],
1553
+ categorical_feature_names=["f_cat"],
1554
+ )
1555
+
1556
+ hist_a_b = HistGradientBoostingClassifier(
1557
+ categorical_features="from_dtype", random_state=0
1558
+ )
1559
+ hist_b_a = HistGradientBoostingClassifier(
1560
+ categorical_features="from_dtype", random_state=0
1561
+ )
1562
+
1563
+ hist_a_b.fit(df_a_b, y)
1564
+ hist_b_a.fit(df_b_a, y)
1565
+
1566
+ assert len(hist_a_b._predictors) == len(hist_b_a._predictors)
1567
+ for predictor_1, predictor_2 in zip(hist_a_b._predictors, hist_b_a._predictors):
1568
+ assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
1569
+
1570
+
1571
+ # TODO(1.6): Remove warning and change default in 1.6
1572
+ def test_categorical_features_warn():
1573
+ """Raise warning when there are categorical features in the input DataFrame.
1574
+
1575
+ This is not tested for polars because polars categories must always be
1576
+ strings and strings can only be handled as categories. Therefore the
1577
+ situation in which a categorical column is currently being treated as
1578
+ numbers and in the future will be treated as categories cannot occur with
1579
+ polars.
1580
+ """
1581
+ pd = pytest.importorskip("pandas")
1582
+ X = pd.DataFrame({"a": pd.Series([1, 2, 3], dtype="category"), "b": [4, 5, 6]})
1583
+ y = [0, 1, 0]
1584
+ hist = HistGradientBoostingClassifier(random_state=0)
1585
+
1586
+ msg = "The categorical_features parameter will change to 'from_dtype' in v1.6"
1587
+ with pytest.warns(FutureWarning, match=msg):
1588
+ hist.fit(X, y)
1589
+
1590
+
1591
+ def get_different_bitness_node_ndarray(node_ndarray):
1592
+ new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32
1593
+
1594
+ # field names in Node struct with np.intp types (see
1595
+ # sklearn/ensemble/_hist_gradient_boosting/common.pyx)
1596
+ indexing_field_names = ["feature_idx"]
1597
+
1598
+ new_dtype_dict = {
1599
+ name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items()
1600
+ }
1601
+ for name in indexing_field_names:
1602
+ new_dtype_dict[name] = new_dtype_for_indexing_fields
1603
+
1604
+ new_dtype = np.dtype(
1605
+ {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())}
1606
+ )
1607
+ return node_ndarray.astype(new_dtype, casting="same_kind")
1608
+
1609
+
1610
+ def reduce_predictor_with_different_bitness(predictor):
1611
+ cls, args, state = predictor.__reduce__()
1612
+
1613
+ new_state = state.copy()
1614
+ new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"])
1615
+
1616
+ return (cls, args, new_state)
1617
+
1618
+
1619
+ def test_different_bitness_pickle():
1620
+ X, y = make_classification(random_state=0)
1621
+
1622
+ clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
1623
+ clf.fit(X, y)
1624
+ score = clf.score(X, y)
1625
+
1626
+ def pickle_dump_with_different_bitness():
1627
+ f = io.BytesIO()
1628
+ p = pickle.Pickler(f)
1629
+ p.dispatch_table = copyreg.dispatch_table.copy()
1630
+ p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
1631
+
1632
+ p.dump(clf)
1633
+ f.seek(0)
1634
+ return f
1635
+
1636
+ # Simulate loading a pickle of the same model trained on a platform with different
1637
+ # bitness that than the platform it will be used to make predictions on:
1638
+ new_clf = pickle.load(pickle_dump_with_different_bitness())
1639
+ new_score = new_clf.score(X, y)
1640
+ assert score == pytest.approx(new_score)
1641
+
1642
+
1643
+ def test_different_bitness_joblib_pickle():
1644
+ # Make sure that a platform specific pickle generated on a 64 bit
1645
+ # platform can be converted at pickle load time into an estimator
1646
+ # with Cython code that works with the host's native integer precision
1647
+ # to index nodes in the tree data structure when the host is a 32 bit
1648
+ # platform (and vice versa).
1649
+ #
1650
+ # This is in particular useful to be able to train a model on a 64 bit Linux
1651
+ # server and deploy the model as part of a (32 bit) WASM in-browser
1652
+ # application using pyodide.
1653
+ X, y = make_classification(random_state=0)
1654
+
1655
+ clf = HistGradientBoostingClassifier(random_state=0, max_depth=3)
1656
+ clf.fit(X, y)
1657
+ score = clf.score(X, y)
1658
+
1659
+ def joblib_dump_with_different_bitness():
1660
+ f = io.BytesIO()
1661
+ p = NumpyPickler(f)
1662
+ p.dispatch_table = copyreg.dispatch_table.copy()
1663
+ p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness
1664
+
1665
+ p.dump(clf)
1666
+ f.seek(0)
1667
+ return f
1668
+
1669
+ new_clf = joblib.load(joblib_dump_with_different_bitness())
1670
+ new_score = new_clf.score(X, y)
1671
+ assert score == pytest.approx(new_score)
1672
+
1673
+
1674
+ def test_pandas_nullable_dtype():
1675
+ # Non regression test for https://github.com/scikit-learn/scikit-learn/issues/28317
1676
+ pd = pytest.importorskip("pandas")
1677
+
1678
+ rng = np.random.default_rng(0)
1679
+ X = pd.DataFrame({"a": rng.integers(10, size=100)}).astype(pd.Int64Dtype())
1680
+ y = rng.integers(2, size=100)
1681
+
1682
+ clf = HistGradientBoostingClassifier()
1683
+ clf.fit(X, y)
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_array_equal
4
+ from pytest import approx
5
+
6
+ from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
7
+ from sklearn.ensemble._hist_gradient_boosting.common import (
8
+ G_H_DTYPE,
9
+ X_BINNED_DTYPE,
10
+ X_BITSET_INNER_DTYPE,
11
+ X_DTYPE,
12
+ Y_DTYPE,
13
+ )
14
+ from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
15
+ from sklearn.preprocessing import OneHotEncoder
16
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
17
+
18
+ n_threads = _openmp_effective_n_threads()
19
+
20
+
21
+ def _make_training_data(n_bins=256, constant_hessian=True):
22
+ rng = np.random.RandomState(42)
23
+ n_samples = 10000
24
+
25
+ # Generate some test data directly binned so as to test the grower code
26
+ # independently of the binning logic.
27
+ X_binned = rng.randint(0, n_bins - 1, size=(n_samples, 2), dtype=X_BINNED_DTYPE)
28
+ X_binned = np.asfortranarray(X_binned)
29
+
30
+ def true_decision_function(input_features):
31
+ """Ground truth decision function
32
+
33
+ This is a very simple yet asymmetric decision tree. Therefore the
34
+ grower code should have no trouble recovering the decision function
35
+ from 10000 training samples.
36
+ """
37
+ if input_features[0] <= n_bins // 2:
38
+ return -1
39
+ else:
40
+ return -1 if input_features[1] <= n_bins // 3 else 1
41
+
42
+ target = np.array([true_decision_function(x) for x in X_binned], dtype=Y_DTYPE)
43
+
44
+ # Assume a square loss applied to an initial model that always predicts 0
45
+ # (hardcoded for this test):
46
+ all_gradients = target.astype(G_H_DTYPE)
47
+ shape_hessians = 1 if constant_hessian else all_gradients.shape
48
+ all_hessians = np.ones(shape=shape_hessians, dtype=G_H_DTYPE)
49
+
50
+ return X_binned, all_gradients, all_hessians
51
+
52
+
53
+ def _check_children_consistency(parent, left, right):
54
+ # Make sure the samples are correctly dispatched from a parent to its
55
+ # children
56
+ assert parent.left_child is left
57
+ assert parent.right_child is right
58
+
59
+ # each sample from the parent is propagated to one of the two children
60
+ assert len(left.sample_indices) + len(right.sample_indices) == len(
61
+ parent.sample_indices
62
+ )
63
+
64
+ assert set(left.sample_indices).union(set(right.sample_indices)) == set(
65
+ parent.sample_indices
66
+ )
67
+
68
+ # samples are sent either to the left or the right node, never to both
69
+ assert set(left.sample_indices).intersection(set(right.sample_indices)) == set()
70
+
71
+
72
+ @pytest.mark.parametrize(
73
+ "n_bins, constant_hessian, stopping_param, shrinkage",
74
+ [
75
+ (11, True, "min_gain_to_split", 0.5),
76
+ (11, False, "min_gain_to_split", 1.0),
77
+ (11, True, "max_leaf_nodes", 1.0),
78
+ (11, False, "max_leaf_nodes", 0.1),
79
+ (42, True, "max_leaf_nodes", 0.01),
80
+ (42, False, "max_leaf_nodes", 1.0),
81
+ (256, True, "min_gain_to_split", 1.0),
82
+ (256, True, "max_leaf_nodes", 0.1),
83
+ ],
84
+ )
85
+ def test_grow_tree(n_bins, constant_hessian, stopping_param, shrinkage):
86
+ X_binned, all_gradients, all_hessians = _make_training_data(
87
+ n_bins=n_bins, constant_hessian=constant_hessian
88
+ )
89
+ n_samples = X_binned.shape[0]
90
+
91
+ if stopping_param == "max_leaf_nodes":
92
+ stopping_param = {"max_leaf_nodes": 3}
93
+ else:
94
+ stopping_param = {"min_gain_to_split": 0.01}
95
+
96
+ grower = TreeGrower(
97
+ X_binned,
98
+ all_gradients,
99
+ all_hessians,
100
+ n_bins=n_bins,
101
+ shrinkage=shrinkage,
102
+ min_samples_leaf=1,
103
+ **stopping_param,
104
+ )
105
+
106
+ # The root node is not yet split, but the best possible split has
107
+ # already been evaluated:
108
+ assert grower.root.left_child is None
109
+ assert grower.root.right_child is None
110
+
111
+ root_split = grower.root.split_info
112
+ assert root_split.feature_idx == 0
113
+ assert root_split.bin_idx == n_bins // 2
114
+ assert len(grower.splittable_nodes) == 1
115
+
116
+ # Calling split next applies the next split and computes the best split
117
+ # for each of the two newly introduced children nodes.
118
+ left_node, right_node = grower.split_next()
119
+
120
+ # All training samples have ben split in the two nodes, approximately
121
+ # 50%/50%
122
+ _check_children_consistency(grower.root, left_node, right_node)
123
+ assert len(left_node.sample_indices) > 0.4 * n_samples
124
+ assert len(left_node.sample_indices) < 0.6 * n_samples
125
+
126
+ if grower.min_gain_to_split > 0:
127
+ # The left node is too pure: there is no gain to split it further.
128
+ assert left_node.split_info.gain < grower.min_gain_to_split
129
+ assert left_node in grower.finalized_leaves
130
+
131
+ # The right node can still be split further, this time on feature #1
132
+ split_info = right_node.split_info
133
+ assert split_info.gain > 1.0
134
+ assert split_info.feature_idx == 1
135
+ assert split_info.bin_idx == n_bins // 3
136
+ assert right_node.left_child is None
137
+ assert right_node.right_child is None
138
+
139
+ # The right split has not been applied yet. Let's do it now:
140
+ assert len(grower.splittable_nodes) == 1
141
+ right_left_node, right_right_node = grower.split_next()
142
+ _check_children_consistency(right_node, right_left_node, right_right_node)
143
+ assert len(right_left_node.sample_indices) > 0.1 * n_samples
144
+ assert len(right_left_node.sample_indices) < 0.2 * n_samples
145
+
146
+ assert len(right_right_node.sample_indices) > 0.2 * n_samples
147
+ assert len(right_right_node.sample_indices) < 0.4 * n_samples
148
+
149
+ # All the leafs are pure, it is not possible to split any further:
150
+ assert not grower.splittable_nodes
151
+
152
+ grower._apply_shrinkage()
153
+
154
+ # Check the values of the leaves:
155
+ assert grower.root.left_child.value == approx(shrinkage)
156
+ assert grower.root.right_child.left_child.value == approx(shrinkage)
157
+ assert grower.root.right_child.right_child.value == approx(-shrinkage, rel=1e-3)
158
+
159
+
160
+ def test_predictor_from_grower():
161
+ # Build a tree on the toy 3-leaf dataset to extract the predictor.
162
+ n_bins = 256
163
+ X_binned, all_gradients, all_hessians = _make_training_data(n_bins=n_bins)
164
+ grower = TreeGrower(
165
+ X_binned,
166
+ all_gradients,
167
+ all_hessians,
168
+ n_bins=n_bins,
169
+ shrinkage=1.0,
170
+ max_leaf_nodes=3,
171
+ min_samples_leaf=5,
172
+ )
173
+ grower.grow()
174
+ assert grower.n_nodes == 5 # (2 decision nodes + 3 leaves)
175
+
176
+ # Check that the node structure can be converted into a predictor
177
+ # object to perform predictions at scale
178
+ # We pass undefined binning_thresholds because we won't use predict anyway
179
+ predictor = grower.make_predictor(
180
+ binning_thresholds=np.zeros((X_binned.shape[1], n_bins))
181
+ )
182
+ assert predictor.nodes.shape[0] == 5
183
+ assert predictor.nodes["is_leaf"].sum() == 3
184
+
185
+ # Probe some predictions for each leaf of the tree
186
+ # each group of 3 samples corresponds to a condition in _make_training_data
187
+ input_data = np.array(
188
+ [
189
+ [0, 0],
190
+ [42, 99],
191
+ [128, 254],
192
+ [129, 0],
193
+ [129, 85],
194
+ [254, 85],
195
+ [129, 86],
196
+ [129, 254],
197
+ [242, 100],
198
+ ],
199
+ dtype=np.uint8,
200
+ )
201
+ missing_values_bin_idx = n_bins - 1
202
+ predictions = predictor.predict_binned(
203
+ input_data, missing_values_bin_idx, n_threads
204
+ )
205
+ expected_targets = [1, 1, 1, 1, 1, 1, -1, -1, -1]
206
+ assert np.allclose(predictions, expected_targets)
207
+
208
+ # Check that training set can be recovered exactly:
209
+ predictions = predictor.predict_binned(X_binned, missing_values_bin_idx, n_threads)
210
+ assert np.allclose(predictions, -all_gradients)
211
+
212
+
213
+ @pytest.mark.parametrize(
214
+ "n_samples, min_samples_leaf, n_bins, constant_hessian, noise",
215
+ [
216
+ (11, 10, 7, True, 0),
217
+ (13, 10, 42, False, 0),
218
+ (56, 10, 255, True, 0.1),
219
+ (101, 3, 7, True, 0),
220
+ (200, 42, 42, False, 0),
221
+ (300, 55, 255, True, 0.1),
222
+ (300, 301, 255, True, 0.1),
223
+ ],
224
+ )
225
+ def test_min_samples_leaf(n_samples, min_samples_leaf, n_bins, constant_hessian, noise):
226
+ rng = np.random.RandomState(seed=0)
227
+ # data = linear target, 3 features, 1 irrelevant.
228
+ X = rng.normal(size=(n_samples, 3))
229
+ y = X[:, 0] - X[:, 1]
230
+ if noise:
231
+ y_scale = y.std()
232
+ y += rng.normal(scale=noise, size=n_samples) * y_scale
233
+ mapper = _BinMapper(n_bins=n_bins)
234
+ X = mapper.fit_transform(X)
235
+
236
+ all_gradients = y.astype(G_H_DTYPE)
237
+ shape_hessian = 1 if constant_hessian else all_gradients.shape
238
+ all_hessians = np.ones(shape=shape_hessian, dtype=G_H_DTYPE)
239
+ grower = TreeGrower(
240
+ X,
241
+ all_gradients,
242
+ all_hessians,
243
+ n_bins=n_bins,
244
+ shrinkage=1.0,
245
+ min_samples_leaf=min_samples_leaf,
246
+ max_leaf_nodes=n_samples,
247
+ )
248
+ grower.grow()
249
+ predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
250
+
251
+ if n_samples >= min_samples_leaf:
252
+ for node in predictor.nodes:
253
+ if node["is_leaf"]:
254
+ assert node["count"] >= min_samples_leaf
255
+ else:
256
+ assert predictor.nodes.shape[0] == 1
257
+ assert predictor.nodes[0]["is_leaf"]
258
+ assert predictor.nodes[0]["count"] == n_samples
259
+
260
+
261
+ @pytest.mark.parametrize("n_samples, min_samples_leaf", [(99, 50), (100, 50)])
262
+ def test_min_samples_leaf_root(n_samples, min_samples_leaf):
263
+ # Make sure root node isn't split if n_samples is not at least twice
264
+ # min_samples_leaf
265
+ rng = np.random.RandomState(seed=0)
266
+
267
+ n_bins = 256
268
+
269
+ # data = linear target, 3 features, 1 irrelevant.
270
+ X = rng.normal(size=(n_samples, 3))
271
+ y = X[:, 0] - X[:, 1]
272
+ mapper = _BinMapper(n_bins=n_bins)
273
+ X = mapper.fit_transform(X)
274
+
275
+ all_gradients = y.astype(G_H_DTYPE)
276
+ all_hessians = np.ones(shape=1, dtype=G_H_DTYPE)
277
+ grower = TreeGrower(
278
+ X,
279
+ all_gradients,
280
+ all_hessians,
281
+ n_bins=n_bins,
282
+ shrinkage=1.0,
283
+ min_samples_leaf=min_samples_leaf,
284
+ max_leaf_nodes=n_samples,
285
+ )
286
+ grower.grow()
287
+ if n_samples >= min_samples_leaf * 2:
288
+ assert len(grower.finalized_leaves) >= 2
289
+ else:
290
+ assert len(grower.finalized_leaves) == 1
291
+
292
+
293
+ def assert_is_stump(grower):
294
+ # To assert that stumps are created when max_depth=1
295
+ for leaf in (grower.root.left_child, grower.root.right_child):
296
+ assert leaf.left_child is None
297
+ assert leaf.right_child is None
298
+
299
+
300
+ @pytest.mark.parametrize("max_depth", [1, 2, 3])
301
+ def test_max_depth(max_depth):
302
+ # Make sure max_depth parameter works as expected
303
+ rng = np.random.RandomState(seed=0)
304
+
305
+ n_bins = 256
306
+ n_samples = 1000
307
+
308
+ # data = linear target, 3 features, 1 irrelevant.
309
+ X = rng.normal(size=(n_samples, 3))
310
+ y = X[:, 0] - X[:, 1]
311
+ mapper = _BinMapper(n_bins=n_bins)
312
+ X = mapper.fit_transform(X)
313
+
314
+ all_gradients = y.astype(G_H_DTYPE)
315
+ all_hessians = np.ones(shape=1, dtype=G_H_DTYPE)
316
+ grower = TreeGrower(X, all_gradients, all_hessians, max_depth=max_depth)
317
+ grower.grow()
318
+
319
+ depth = max(leaf.depth for leaf in grower.finalized_leaves)
320
+ assert depth == max_depth
321
+
322
+ if max_depth == 1:
323
+ assert_is_stump(grower)
324
+
325
+
326
+ def test_input_validation():
327
+ X_binned, all_gradients, all_hessians = _make_training_data()
328
+
329
+ X_binned_float = X_binned.astype(np.float32)
330
+ with pytest.raises(NotImplementedError, match="X_binned must be of type uint8"):
331
+ TreeGrower(X_binned_float, all_gradients, all_hessians)
332
+
333
+ X_binned_C_array = np.ascontiguousarray(X_binned)
334
+ with pytest.raises(
335
+ ValueError, match="X_binned should be passed as Fortran contiguous array"
336
+ ):
337
+ TreeGrower(X_binned_C_array, all_gradients, all_hessians)
338
+
339
+
340
+ def test_init_parameters_validation():
341
+ X_binned, all_gradients, all_hessians = _make_training_data()
342
+ with pytest.raises(ValueError, match="min_gain_to_split=-1 must be positive"):
343
+ TreeGrower(X_binned, all_gradients, all_hessians, min_gain_to_split=-1)
344
+
345
+ with pytest.raises(ValueError, match="min_hessian_to_split=-1 must be positive"):
346
+ TreeGrower(X_binned, all_gradients, all_hessians, min_hessian_to_split=-1)
347
+
348
+
349
+ def test_missing_value_predict_only():
350
+ # Make sure that missing values are supported at predict time even if they
351
+ # were not encountered in the training data: the missing values are
352
+ # assigned to whichever child has the most samples.
353
+
354
+ rng = np.random.RandomState(0)
355
+ n_samples = 100
356
+ X_binned = rng.randint(0, 256, size=(n_samples, 1), dtype=np.uint8)
357
+ X_binned = np.asfortranarray(X_binned)
358
+
359
+ gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
360
+ hessians = np.ones(shape=1, dtype=G_H_DTYPE)
361
+
362
+ grower = TreeGrower(
363
+ X_binned, gradients, hessians, min_samples_leaf=5, has_missing_values=False
364
+ )
365
+ grower.grow()
366
+
367
+ # We pass undefined binning_thresholds because we won't use predict anyway
368
+ predictor = grower.make_predictor(
369
+ binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
370
+ )
371
+
372
+ # go from root to a leaf, always following node with the most samples.
373
+ # That's the path nans are supposed to take
374
+ node = predictor.nodes[0]
375
+ while not node["is_leaf"]:
376
+ left = predictor.nodes[node["left"]]
377
+ right = predictor.nodes[node["right"]]
378
+ node = left if left["count"] > right["count"] else right
379
+
380
+ prediction_main_path = node["value"]
381
+
382
+ # now build X_test with only nans, and make sure all predictions are equal
383
+ # to prediction_main_path
384
+ all_nans = np.full(shape=(n_samples, 1), fill_value=np.nan)
385
+ known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
386
+ f_idx_map = np.zeros(0, dtype=np.uint32)
387
+
388
+ y_pred = predictor.predict(all_nans, known_cat_bitsets, f_idx_map, n_threads)
389
+ assert np.all(y_pred == prediction_main_path)
390
+
391
+
392
+ def test_split_on_nan_with_infinite_values():
393
+ # Make sure the split on nan situations are respected even when there are
394
+ # samples with +inf values (we set the threshold to +inf when we have a
395
+ # split on nan so this test makes sure this does not introduce edge-case
396
+ # bugs). We need to use the private API so that we can also test
397
+ # predict_binned().
398
+
399
+ X = np.array([0, 1, np.inf, np.nan, np.nan]).reshape(-1, 1)
400
+ # the gradient values will force a split on nan situation
401
+ gradients = np.array([0, 0, 0, 100, 100], dtype=G_H_DTYPE)
402
+ hessians = np.ones(shape=1, dtype=G_H_DTYPE)
403
+
404
+ bin_mapper = _BinMapper()
405
+ X_binned = bin_mapper.fit_transform(X)
406
+
407
+ n_bins_non_missing = 3
408
+ has_missing_values = True
409
+ grower = TreeGrower(
410
+ X_binned,
411
+ gradients,
412
+ hessians,
413
+ n_bins_non_missing=n_bins_non_missing,
414
+ has_missing_values=has_missing_values,
415
+ min_samples_leaf=1,
416
+ n_threads=n_threads,
417
+ )
418
+
419
+ grower.grow()
420
+
421
+ predictor = grower.make_predictor(binning_thresholds=bin_mapper.bin_thresholds_)
422
+
423
+ # sanity check: this was a split on nan
424
+ assert predictor.nodes[0]["num_threshold"] == np.inf
425
+ assert predictor.nodes[0]["bin_threshold"] == n_bins_non_missing - 1
426
+
427
+ known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
428
+
429
+ # Make sure in particular that the +inf sample is mapped to the left child
430
+ # Note that lightgbm "fails" here and will assign the inf sample to the
431
+ # right child, even though it's a "split on nan" situation.
432
+ predictions = predictor.predict(X, known_cat_bitsets, f_idx_map, n_threads)
433
+ predictions_binned = predictor.predict_binned(
434
+ X_binned,
435
+ missing_values_bin_idx=bin_mapper.missing_values_bin_idx_,
436
+ n_threads=n_threads,
437
+ )
438
+ np.testing.assert_allclose(predictions, -gradients)
439
+ np.testing.assert_allclose(predictions_binned, -gradients)
440
+
441
+
442
+ def test_grow_tree_categories():
443
+ # Check that the grower produces the right predictor tree when a split is
444
+ # categorical
445
+ X_binned = np.array([[0, 1] * 11 + [1]], dtype=X_BINNED_DTYPE).T
446
+ X_binned = np.asfortranarray(X_binned)
447
+
448
+ all_gradients = np.array([10, 1] * 11 + [1], dtype=G_H_DTYPE)
449
+ all_hessians = np.ones(1, dtype=G_H_DTYPE)
450
+ is_categorical = np.ones(1, dtype=np.uint8)
451
+
452
+ grower = TreeGrower(
453
+ X_binned,
454
+ all_gradients,
455
+ all_hessians,
456
+ n_bins=4,
457
+ shrinkage=1.0,
458
+ min_samples_leaf=1,
459
+ is_categorical=is_categorical,
460
+ n_threads=n_threads,
461
+ )
462
+ grower.grow()
463
+ assert grower.n_nodes == 3
464
+
465
+ categories = [np.array([4, 9], dtype=X_DTYPE)]
466
+ predictor = grower.make_predictor(binning_thresholds=categories)
467
+ root = predictor.nodes[0]
468
+ assert root["count"] == 23
469
+ assert root["depth"] == 0
470
+ assert root["is_categorical"]
471
+
472
+ left, right = predictor.nodes[root["left"]], predictor.nodes[root["right"]]
473
+
474
+ # arbitrary validation, but this means ones go to the left.
475
+ assert left["count"] >= right["count"]
476
+
477
+ # check binned category value (1)
478
+ expected_binned_cat_bitset = [2**1] + [0] * 7
479
+ binned_cat_bitset = predictor.binned_left_cat_bitsets
480
+ assert_array_equal(binned_cat_bitset[0], expected_binned_cat_bitset)
481
+
482
+ # check raw category value (9)
483
+ expected_raw_cat_bitsets = [2**9] + [0] * 7
484
+ raw_cat_bitsets = predictor.raw_left_cat_bitsets
485
+ assert_array_equal(raw_cat_bitsets[0], expected_raw_cat_bitsets)
486
+
487
+ # Note that since there was no missing values during training, the missing
488
+ # values aren't part of the bitsets. However, we expect the missing values
489
+ # to go to the biggest child (i.e. the left one).
490
+ # The left child has a value of -1 = negative gradient.
491
+ assert root["missing_go_to_left"]
492
+
493
+ # make sure binned missing values are mapped to the left child during
494
+ # prediction
495
+ prediction_binned = predictor.predict_binned(
496
+ np.asarray([[6]]).astype(X_BINNED_DTYPE),
497
+ missing_values_bin_idx=6,
498
+ n_threads=n_threads,
499
+ )
500
+ assert_allclose(prediction_binned, [-1]) # negative gradient
501
+
502
+ # make sure raw missing values are mapped to the left child during
503
+ # prediction
504
+ known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32) # ignored anyway
505
+ f_idx_map = np.array([0], dtype=np.uint32)
506
+ prediction = predictor.predict(
507
+ np.array([[np.nan]]), known_cat_bitsets, f_idx_map, n_threads
508
+ )
509
+ assert_allclose(prediction, [-1])
510
+
511
+
512
+ @pytest.mark.parametrize("min_samples_leaf", (1, 20))
513
+ @pytest.mark.parametrize("n_unique_categories", (2, 10, 100))
514
+ @pytest.mark.parametrize("target", ("binary", "random", "equal"))
515
+ def test_ohe_equivalence(min_samples_leaf, n_unique_categories, target):
516
+ # Make sure that native categorical splits are equivalent to using a OHE,
517
+ # when given enough depth
518
+
519
+ rng = np.random.RandomState(0)
520
+ n_samples = 10_000
521
+ X_binned = rng.randint(0, n_unique_categories, size=(n_samples, 1), dtype=np.uint8)
522
+
523
+ X_ohe = OneHotEncoder(sparse_output=False).fit_transform(X_binned)
524
+ X_ohe = np.asfortranarray(X_ohe).astype(np.uint8)
525
+
526
+ if target == "equal":
527
+ gradients = X_binned.reshape(-1)
528
+ elif target == "binary":
529
+ gradients = (X_binned % 2).reshape(-1)
530
+ else:
531
+ gradients = rng.randn(n_samples)
532
+ gradients = gradients.astype(G_H_DTYPE)
533
+
534
+ hessians = np.ones(shape=1, dtype=G_H_DTYPE)
535
+
536
+ grower_params = {
537
+ "min_samples_leaf": min_samples_leaf,
538
+ "max_depth": None,
539
+ "max_leaf_nodes": None,
540
+ }
541
+
542
+ grower = TreeGrower(
543
+ X_binned, gradients, hessians, is_categorical=[True], **grower_params
544
+ )
545
+ grower.grow()
546
+ # we pass undefined bin_thresholds because we won't use predict()
547
+ predictor = grower.make_predictor(
548
+ binning_thresholds=np.zeros((1, n_unique_categories))
549
+ )
550
+ preds = predictor.predict_binned(
551
+ X_binned, missing_values_bin_idx=255, n_threads=n_threads
552
+ )
553
+
554
+ grower_ohe = TreeGrower(X_ohe, gradients, hessians, **grower_params)
555
+ grower_ohe.grow()
556
+ predictor_ohe = grower_ohe.make_predictor(
557
+ binning_thresholds=np.zeros((X_ohe.shape[1], n_unique_categories))
558
+ )
559
+ preds_ohe = predictor_ohe.predict_binned(
560
+ X_ohe, missing_values_bin_idx=255, n_threads=n_threads
561
+ )
562
+
563
+ assert predictor.get_max_depth() <= predictor_ohe.get_max_depth()
564
+ if target == "binary" and n_unique_categories > 2:
565
+ # OHE needs more splits to achieve the same predictions
566
+ assert predictor.get_max_depth() < predictor_ohe.get_max_depth()
567
+
568
+ np.testing.assert_allclose(preds, preds_ohe)
569
+
570
+
571
+ def test_grower_interaction_constraints():
572
+ """Check that grower respects interaction constraints."""
573
+ n_features = 6
574
+ interaction_cst = [{0, 1}, {1, 2}, {3, 4, 5}]
575
+ n_samples = 10
576
+ n_bins = 6
577
+ root_feature_splits = []
578
+
579
+ def get_all_children(node):
580
+ res = []
581
+ if node.is_leaf:
582
+ return res
583
+ for n in [node.left_child, node.right_child]:
584
+ res.append(n)
585
+ res.extend(get_all_children(n))
586
+ return res
587
+
588
+ for seed in range(20):
589
+ rng = np.random.RandomState(seed)
590
+
591
+ X_binned = rng.randint(
592
+ 0, n_bins - 1, size=(n_samples, n_features), dtype=X_BINNED_DTYPE
593
+ )
594
+ X_binned = np.asfortranarray(X_binned)
595
+ gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
596
+ hessians = np.ones(shape=1, dtype=G_H_DTYPE)
597
+
598
+ grower = TreeGrower(
599
+ X_binned,
600
+ gradients,
601
+ hessians,
602
+ n_bins=n_bins,
603
+ min_samples_leaf=1,
604
+ interaction_cst=interaction_cst,
605
+ n_threads=n_threads,
606
+ )
607
+ grower.grow()
608
+
609
+ root_feature_idx = grower.root.split_info.feature_idx
610
+ root_feature_splits.append(root_feature_idx)
611
+
612
+ feature_idx_to_constraint_set = {
613
+ 0: {0, 1},
614
+ 1: {0, 1, 2},
615
+ 2: {1, 2},
616
+ 3: {3, 4, 5},
617
+ 4: {3, 4, 5},
618
+ 5: {3, 4, 5},
619
+ }
620
+
621
+ root_constraint_set = feature_idx_to_constraint_set[root_feature_idx]
622
+ for node in (grower.root.left_child, grower.root.right_child):
623
+ # Root's children's allowed_features must be the root's constraints set.
624
+ assert_array_equal(node.allowed_features, list(root_constraint_set))
625
+ for node in get_all_children(grower.root):
626
+ if node.is_leaf:
627
+ continue
628
+ # Ensure that each node uses a subset of features of its parent node.
629
+ parent_interaction_cst_indices = set(node.interaction_cst_indices)
630
+ right_interactions_cst_indices = set(
631
+ node.right_child.interaction_cst_indices
632
+ )
633
+ left_interactions_cst_indices = set(node.left_child.interaction_cst_indices)
634
+
635
+ assert right_interactions_cst_indices.issubset(
636
+ parent_interaction_cst_indices
637
+ )
638
+ assert left_interactions_cst_indices.issubset(
639
+ parent_interaction_cst_indices
640
+ )
641
+ # The features used for split must have been present in the root's
642
+ # constraint set.
643
+ assert node.split_info.feature_idx in root_constraint_set
644
+
645
+ # Make sure that every feature is used at least once as split for the root node.
646
+ assert (
647
+ len(set(root_feature_splits))
648
+ == len(set().union(*interaction_cst))
649
+ == n_features
650
+ )
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_array_equal
4
+
5
+ from sklearn.ensemble._hist_gradient_boosting.common import (
6
+ G_H_DTYPE,
7
+ HISTOGRAM_DTYPE,
8
+ X_BINNED_DTYPE,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.histogram import (
11
+ _build_histogram,
12
+ _build_histogram_naive,
13
+ _build_histogram_no_hessian,
14
+ _build_histogram_root,
15
+ _build_histogram_root_no_hessian,
16
+ _subtract_histograms,
17
+ )
18
+
19
+
20
+ @pytest.mark.parametrize("build_func", [_build_histogram_naive, _build_histogram])
21
+ def test_build_histogram(build_func):
22
+ binned_feature = np.array([0, 2, 0, 1, 2, 0, 2, 1], dtype=X_BINNED_DTYPE)
23
+
24
+ # Small sample_indices (below unrolling threshold)
25
+ ordered_gradients = np.array([0, 1, 3], dtype=G_H_DTYPE)
26
+ ordered_hessians = np.array([1, 1, 2], dtype=G_H_DTYPE)
27
+
28
+ sample_indices = np.array([0, 2, 3], dtype=np.uint32)
29
+ hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
30
+ build_func(
31
+ 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist
32
+ )
33
+ hist = hist[0]
34
+ assert_array_equal(hist["count"], [2, 1, 0])
35
+ assert_allclose(hist["sum_gradients"], [1, 3, 0])
36
+ assert_allclose(hist["sum_hessians"], [2, 2, 0])
37
+
38
+ # Larger sample_indices (above unrolling threshold)
39
+ sample_indices = np.array([0, 2, 3, 6, 7], dtype=np.uint32)
40
+ ordered_gradients = np.array([0, 1, 3, 0, 1], dtype=G_H_DTYPE)
41
+ ordered_hessians = np.array([1, 1, 2, 1, 0], dtype=G_H_DTYPE)
42
+
43
+ hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
44
+ build_func(
45
+ 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist
46
+ )
47
+ hist = hist[0]
48
+ assert_array_equal(hist["count"], [2, 2, 1])
49
+ assert_allclose(hist["sum_gradients"], [1, 4, 0])
50
+ assert_allclose(hist["sum_hessians"], [2, 2, 1])
51
+
52
+
53
+ def test_histogram_sample_order_independence():
54
+ # Make sure the order of the samples has no impact on the histogram
55
+ # computations
56
+ rng = np.random.RandomState(42)
57
+ n_sub_samples = 100
58
+ n_samples = 1000
59
+ n_bins = 256
60
+
61
+ binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=X_BINNED_DTYPE)
62
+ sample_indices = rng.choice(
63
+ np.arange(n_samples, dtype=np.uint32), n_sub_samples, replace=False
64
+ )
65
+ ordered_gradients = rng.randn(n_sub_samples).astype(G_H_DTYPE)
66
+ hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
67
+ _build_histogram_no_hessian(
68
+ 0, sample_indices, binned_feature, ordered_gradients, hist_gc
69
+ )
70
+
71
+ ordered_hessians = rng.exponential(size=n_sub_samples).astype(G_H_DTYPE)
72
+ hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
73
+ _build_histogram(
74
+ 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist_ghc
75
+ )
76
+
77
+ permutation = rng.permutation(n_sub_samples)
78
+ hist_gc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
79
+ _build_histogram_no_hessian(
80
+ 0,
81
+ sample_indices[permutation],
82
+ binned_feature,
83
+ ordered_gradients[permutation],
84
+ hist_gc_perm,
85
+ )
86
+
87
+ hist_ghc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
88
+ _build_histogram(
89
+ 0,
90
+ sample_indices[permutation],
91
+ binned_feature,
92
+ ordered_gradients[permutation],
93
+ ordered_hessians[permutation],
94
+ hist_ghc_perm,
95
+ )
96
+
97
+ hist_gc = hist_gc[0]
98
+ hist_ghc = hist_ghc[0]
99
+ hist_gc_perm = hist_gc_perm[0]
100
+ hist_ghc_perm = hist_ghc_perm[0]
101
+
102
+ assert_allclose(hist_gc["sum_gradients"], hist_gc_perm["sum_gradients"])
103
+ assert_array_equal(hist_gc["count"], hist_gc_perm["count"])
104
+
105
+ assert_allclose(hist_ghc["sum_gradients"], hist_ghc_perm["sum_gradients"])
106
+ assert_allclose(hist_ghc["sum_hessians"], hist_ghc_perm["sum_hessians"])
107
+ assert_array_equal(hist_ghc["count"], hist_ghc_perm["count"])
108
+
109
+
110
+ @pytest.mark.parametrize("constant_hessian", [True, False])
111
+ def test_unrolled_equivalent_to_naive(constant_hessian):
112
+ # Make sure the different unrolled histogram computations give the same
113
+ # results as the naive one.
114
+ rng = np.random.RandomState(42)
115
+ n_samples = 10
116
+ n_bins = 5
117
+ sample_indices = np.arange(n_samples).astype(np.uint32)
118
+ binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
119
+ ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
120
+ if constant_hessian:
121
+ ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
122
+ else:
123
+ ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
124
+
125
+ hist_gc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
126
+ hist_ghc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
127
+ hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
128
+ hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
129
+ hist_naive = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
130
+
131
+ _build_histogram_root_no_hessian(0, binned_feature, ordered_gradients, hist_gc_root)
132
+ _build_histogram_root(
133
+ 0, binned_feature, ordered_gradients, ordered_hessians, hist_ghc_root
134
+ )
135
+ _build_histogram_no_hessian(
136
+ 0, sample_indices, binned_feature, ordered_gradients, hist_gc
137
+ )
138
+ _build_histogram(
139
+ 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist_ghc
140
+ )
141
+ _build_histogram_naive(
142
+ 0,
143
+ sample_indices,
144
+ binned_feature,
145
+ ordered_gradients,
146
+ ordered_hessians,
147
+ hist_naive,
148
+ )
149
+
150
+ hist_naive = hist_naive[0]
151
+ hist_gc_root = hist_gc_root[0]
152
+ hist_ghc_root = hist_ghc_root[0]
153
+ hist_gc = hist_gc[0]
154
+ hist_ghc = hist_ghc[0]
155
+ for hist in (hist_gc_root, hist_ghc_root, hist_gc, hist_ghc):
156
+ assert_array_equal(hist["count"], hist_naive["count"])
157
+ assert_allclose(hist["sum_gradients"], hist_naive["sum_gradients"])
158
+ for hist in (hist_ghc_root, hist_ghc):
159
+ assert_allclose(hist["sum_hessians"], hist_naive["sum_hessians"])
160
+ for hist in (hist_gc_root, hist_gc):
161
+ assert_array_equal(hist["sum_hessians"], np.zeros(n_bins))
162
+
163
+
164
+ @pytest.mark.parametrize("constant_hessian", [True, False])
165
+ def test_hist_subtraction(constant_hessian):
166
+ # Make sure the histogram subtraction trick gives the same result as the
167
+ # classical method.
168
+ rng = np.random.RandomState(42)
169
+ n_samples = 10
170
+ n_bins = 5
171
+ sample_indices = np.arange(n_samples).astype(np.uint32)
172
+ binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
173
+ ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
174
+ if constant_hessian:
175
+ ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
176
+ else:
177
+ ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
178
+
179
+ hist_parent = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
180
+ if constant_hessian:
181
+ _build_histogram_no_hessian(
182
+ 0, sample_indices, binned_feature, ordered_gradients, hist_parent
183
+ )
184
+ else:
185
+ _build_histogram(
186
+ 0,
187
+ sample_indices,
188
+ binned_feature,
189
+ ordered_gradients,
190
+ ordered_hessians,
191
+ hist_parent,
192
+ )
193
+
194
+ mask = rng.randint(0, 2, n_samples).astype(bool)
195
+
196
+ sample_indices_left = sample_indices[mask]
197
+ ordered_gradients_left = ordered_gradients[mask]
198
+ ordered_hessians_left = ordered_hessians[mask]
199
+ hist_left = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
200
+ if constant_hessian:
201
+ _build_histogram_no_hessian(
202
+ 0, sample_indices_left, binned_feature, ordered_gradients_left, hist_left
203
+ )
204
+ else:
205
+ _build_histogram(
206
+ 0,
207
+ sample_indices_left,
208
+ binned_feature,
209
+ ordered_gradients_left,
210
+ ordered_hessians_left,
211
+ hist_left,
212
+ )
213
+
214
+ sample_indices_right = sample_indices[~mask]
215
+ ordered_gradients_right = ordered_gradients[~mask]
216
+ ordered_hessians_right = ordered_hessians[~mask]
217
+ hist_right = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
218
+ if constant_hessian:
219
+ _build_histogram_no_hessian(
220
+ 0, sample_indices_right, binned_feature, ordered_gradients_right, hist_right
221
+ )
222
+ else:
223
+ _build_histogram(
224
+ 0,
225
+ sample_indices_right,
226
+ binned_feature,
227
+ ordered_gradients_right,
228
+ ordered_hessians_right,
229
+ hist_right,
230
+ )
231
+
232
+ hist_left_sub = np.copy(hist_parent)
233
+ hist_right_sub = np.copy(hist_parent)
234
+ _subtract_histograms(0, n_bins, hist_left_sub, hist_right)
235
+ _subtract_histograms(0, n_bins, hist_right_sub, hist_left)
236
+
237
+ for key in ("count", "sum_hessians", "sum_gradients"):
238
+ assert_allclose(hist_left[key], hist_left_sub[key], rtol=1e-6)
239
+ assert_allclose(hist_right[key], hist_right_sub[key], rtol=1e-6)
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from sklearn.ensemble import (
7
+ HistGradientBoostingClassifier,
8
+ HistGradientBoostingRegressor,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.common import (
11
+ G_H_DTYPE,
12
+ X_BINNED_DTYPE,
13
+ MonotonicConstraint,
14
+ )
15
+ from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
16
+ from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
17
+ from sklearn.ensemble._hist_gradient_boosting.splitting import (
18
+ Splitter,
19
+ compute_node_value,
20
+ )
21
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
22
+ from sklearn.utils._testing import _convert_container
23
+
24
+ n_threads = _openmp_effective_n_threads()
25
+
26
+
27
+ def is_increasing(a):
28
+ return (np.diff(a) >= 0.0).all()
29
+
30
+
31
+ def is_decreasing(a):
32
+ return (np.diff(a) <= 0.0).all()
33
+
34
+
35
+ def assert_leaves_values_monotonic(predictor, monotonic_cst):
36
+ # make sure leaves values (from left to right) are either all increasing
37
+ # or all decreasing (or neither) depending on the monotonic constraint.
38
+ nodes = predictor.nodes
39
+
40
+ def get_leaves_values():
41
+ """get leaves values from left to right"""
42
+ values = []
43
+
44
+ def depth_first_collect_leaf_values(node_idx):
45
+ node = nodes[node_idx]
46
+ if node["is_leaf"]:
47
+ values.append(node["value"])
48
+ return
49
+ depth_first_collect_leaf_values(node["left"])
50
+ depth_first_collect_leaf_values(node["right"])
51
+
52
+ depth_first_collect_leaf_values(0) # start at root (0)
53
+ return values
54
+
55
+ values = get_leaves_values()
56
+
57
+ if monotonic_cst == MonotonicConstraint.NO_CST:
58
+ # some increasing, some decreasing
59
+ assert not is_increasing(values) and not is_decreasing(values)
60
+ elif monotonic_cst == MonotonicConstraint.POS:
61
+ # all increasing
62
+ assert is_increasing(values)
63
+ else: # NEG
64
+ # all decreasing
65
+ assert is_decreasing(values)
66
+
67
+
68
+ def assert_children_values_monotonic(predictor, monotonic_cst):
69
+ # Make sure siblings values respect the monotonic constraints. Left should
70
+ # be lower (resp greater) than right child if constraint is POS (resp.
71
+ # NEG).
72
+ # Note that this property alone isn't enough to ensure full monotonicity,
73
+ # since we also need to guanrantee that all the descendents of the left
74
+ # child won't be greater (resp. lower) than the right child, or its
75
+ # descendents. That's why we need to bound the predicted values (this is
76
+ # tested in assert_children_values_bounded)
77
+ nodes = predictor.nodes
78
+ left_lower = []
79
+ left_greater = []
80
+ for node in nodes:
81
+ if node["is_leaf"]:
82
+ continue
83
+
84
+ left_idx = node["left"]
85
+ right_idx = node["right"]
86
+
87
+ if nodes[left_idx]["value"] < nodes[right_idx]["value"]:
88
+ left_lower.append(node)
89
+ elif nodes[left_idx]["value"] > nodes[right_idx]["value"]:
90
+ left_greater.append(node)
91
+
92
+ if monotonic_cst == MonotonicConstraint.NO_CST:
93
+ assert left_lower and left_greater
94
+ elif monotonic_cst == MonotonicConstraint.POS:
95
+ assert left_lower and not left_greater
96
+ else: # NEG
97
+ assert not left_lower and left_greater
98
+
99
+
100
+ def assert_children_values_bounded(grower, monotonic_cst):
101
+ # Make sure that the values of the children of a node are bounded by the
102
+ # middle value between that node and its sibling (if there is a monotonic
103
+ # constraint).
104
+ # As a bonus, we also check that the siblings values are properly ordered
105
+ # which is slightly redundant with assert_children_values_monotonic (but
106
+ # this check is done on the grower nodes whereas
107
+ # assert_children_values_monotonic is done on the predictor nodes)
108
+
109
+ if monotonic_cst == MonotonicConstraint.NO_CST:
110
+ return
111
+
112
+ def recursively_check_children_node_values(node, right_sibling=None):
113
+ if node.is_leaf:
114
+ return
115
+ if right_sibling is not None:
116
+ middle = (node.value + right_sibling.value) / 2
117
+ if monotonic_cst == MonotonicConstraint.POS:
118
+ assert node.left_child.value <= node.right_child.value <= middle
119
+ if not right_sibling.is_leaf:
120
+ assert (
121
+ middle
122
+ <= right_sibling.left_child.value
123
+ <= right_sibling.right_child.value
124
+ )
125
+ else: # NEG
126
+ assert node.left_child.value >= node.right_child.value >= middle
127
+ if not right_sibling.is_leaf:
128
+ assert (
129
+ middle
130
+ >= right_sibling.left_child.value
131
+ >= right_sibling.right_child.value
132
+ )
133
+
134
+ recursively_check_children_node_values(
135
+ node.left_child, right_sibling=node.right_child
136
+ )
137
+ recursively_check_children_node_values(node.right_child)
138
+
139
+ recursively_check_children_node_values(grower.root)
140
+
141
+
142
+ @pytest.mark.parametrize("seed", range(3))
143
+ @pytest.mark.parametrize(
144
+ "monotonic_cst",
145
+ (
146
+ MonotonicConstraint.NO_CST,
147
+ MonotonicConstraint.POS,
148
+ MonotonicConstraint.NEG,
149
+ ),
150
+ )
151
+ def test_nodes_values(monotonic_cst, seed):
152
+ # Build a single tree with only one feature, and make sure the nodes
153
+ # values respect the monotonic constraints.
154
+
155
+ # Considering the following tree with a monotonic POS constraint, we
156
+ # should have:
157
+ #
158
+ # root
159
+ # / \
160
+ # 5 10 # middle = 7.5
161
+ # / \ / \
162
+ # a b c d
163
+ #
164
+ # a <= b and c <= d (assert_children_values_monotonic)
165
+ # a, b <= middle <= c, d (assert_children_values_bounded)
166
+ # a <= b <= c <= d (assert_leaves_values_monotonic)
167
+ #
168
+ # The last one is a consequence of the others, but can't hurt to check
169
+
170
+ rng = np.random.RandomState(seed)
171
+ n_samples = 1000
172
+ n_features = 1
173
+ X_binned = rng.randint(0, 255, size=(n_samples, n_features), dtype=np.uint8)
174
+ X_binned = np.asfortranarray(X_binned)
175
+
176
+ gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
177
+ hessians = np.ones(shape=1, dtype=G_H_DTYPE)
178
+
179
+ grower = TreeGrower(
180
+ X_binned, gradients, hessians, monotonic_cst=[monotonic_cst], shrinkage=0.1
181
+ )
182
+ grower.grow()
183
+
184
+ # grow() will shrink the leaves values at the very end. For our comparison
185
+ # tests, we need to revert the shrinkage of the leaves, else we would
186
+ # compare the value of a leaf (shrunk) with a node (not shrunk) and the
187
+ # test would not be correct.
188
+ for leave in grower.finalized_leaves:
189
+ leave.value /= grower.shrinkage
190
+
191
+ # We pass undefined binning_thresholds because we won't use predict anyway
192
+ predictor = grower.make_predictor(
193
+ binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
194
+ )
195
+
196
+ # The consistency of the bounds can only be checked on the tree grower
197
+ # as the node bounds are not copied into the predictor tree. The
198
+ # consistency checks on the values of node children and leaves can be
199
+ # done either on the grower tree or on the predictor tree. We only
200
+ # do those checks on the predictor tree as the latter is derived from
201
+ # the former.
202
+ assert_children_values_monotonic(predictor, monotonic_cst)
203
+ assert_children_values_bounded(grower, monotonic_cst)
204
+ assert_leaves_values_monotonic(predictor, monotonic_cst)
205
+
206
+
207
+ @pytest.mark.parametrize("use_feature_names", (True, False))
208
+ def test_predictions(global_random_seed, use_feature_names):
209
+ # Train a model with a POS constraint on the first feature and a NEG
210
+ # constraint on the second feature, and make sure the constraints are
211
+ # respected by checking the predictions.
212
+ # test adapted from lightgbm's test_monotone_constraint(), itself inspired
213
+ # by https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html
214
+
215
+ rng = np.random.RandomState(global_random_seed)
216
+
217
+ n_samples = 1000
218
+ f_0 = rng.rand(n_samples) # positive correlation with y
219
+ f_1 = rng.rand(n_samples) # negative correslation with y
220
+ X = np.c_[f_0, f_1]
221
+ columns_name = ["f_0", "f_1"]
222
+ constructor_name = "dataframe" if use_feature_names else "array"
223
+ X = _convert_container(X, constructor_name, columns_name=columns_name)
224
+
225
+ noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
226
+ y = 5 * f_0 + np.sin(10 * np.pi * f_0) - 5 * f_1 - np.cos(10 * np.pi * f_1) + noise
227
+
228
+ if use_feature_names:
229
+ monotonic_cst = {"f_0": +1, "f_1": -1}
230
+ else:
231
+ monotonic_cst = [+1, -1]
232
+
233
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
234
+ gbdt.fit(X, y)
235
+
236
+ linspace = np.linspace(0, 1, 100)
237
+ sin = np.sin(linspace)
238
+ constant = np.full_like(linspace, fill_value=0.5)
239
+
240
+ # We now assert the predictions properly respect the constraints, on each
241
+ # feature. When testing for a feature we need to set the other one to a
242
+ # constant, because the monotonic constraints are only a "all else being
243
+ # equal" type of constraints:
244
+ # a constraint on the first feature only means that
245
+ # x0 < x0' => f(x0, x1) < f(x0', x1)
246
+ # while x1 stays constant.
247
+ # The constraint does not guanrantee that
248
+ # x0 < x0' => f(x0, x1) < f(x0', x1')
249
+
250
+ # First feature (POS)
251
+ # assert pred is all increasing when f_0 is all increasing
252
+ X = np.c_[linspace, constant]
253
+ X = _convert_container(X, constructor_name, columns_name=columns_name)
254
+ pred = gbdt.predict(X)
255
+ assert is_increasing(pred)
256
+ # assert pred actually follows the variations of f_0
257
+ X = np.c_[sin, constant]
258
+ X = _convert_container(X, constructor_name, columns_name=columns_name)
259
+ pred = gbdt.predict(X)
260
+ assert np.all((np.diff(pred) >= 0) == (np.diff(sin) >= 0))
261
+
262
+ # Second feature (NEG)
263
+ # assert pred is all decreasing when f_1 is all increasing
264
+ X = np.c_[constant, linspace]
265
+ X = _convert_container(X, constructor_name, columns_name=columns_name)
266
+ pred = gbdt.predict(X)
267
+ assert is_decreasing(pred)
268
+ # assert pred actually follows the inverse variations of f_1
269
+ X = np.c_[constant, sin]
270
+ X = _convert_container(X, constructor_name, columns_name=columns_name)
271
+ pred = gbdt.predict(X)
272
+ assert ((np.diff(pred) <= 0) == (np.diff(sin) >= 0)).all()
273
+
274
+
275
+ def test_input_error():
276
+ X = [[1, 2], [2, 3], [3, 4]]
277
+ y = [0, 1, 2]
278
+
279
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, 0, -1])
280
+ with pytest.raises(
281
+ ValueError, match=re.escape("monotonic_cst has shape (3,) but the input data")
282
+ ):
283
+ gbdt.fit(X, y)
284
+
285
+ for monotonic_cst in ([1, 3], [1, -3], [0.3, -0.7]):
286
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
287
+ expected_msg = re.escape(
288
+ "must be an array-like of -1, 0 or 1. Observed values:"
289
+ )
290
+ with pytest.raises(ValueError, match=expected_msg):
291
+ gbdt.fit(X, y)
292
+
293
+ gbdt = HistGradientBoostingClassifier(monotonic_cst=[0, 1])
294
+ with pytest.raises(
295
+ ValueError,
296
+ match="monotonic constraints are not supported for multiclass classification",
297
+ ):
298
+ gbdt.fit(X, y)
299
+
300
+
301
+ def test_input_error_related_to_feature_names():
302
+ pd = pytest.importorskip("pandas")
303
+ X = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]})
304
+ y = np.array([0, 1, 0])
305
+
306
+ monotonic_cst = {"d": 1, "a": 1, "c": -1}
307
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
308
+ expected_msg = re.escape(
309
+ "monotonic_cst contains 2 unexpected feature names: ['c', 'd']."
310
+ )
311
+ with pytest.raises(ValueError, match=expected_msg):
312
+ gbdt.fit(X, y)
313
+
314
+ monotonic_cst = {k: 1 for k in "abcdefghijklmnopqrstuvwxyz"}
315
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
316
+ expected_msg = re.escape(
317
+ "monotonic_cst contains 24 unexpected feature names: "
318
+ "['c', 'd', 'e', 'f', 'g', '...']."
319
+ )
320
+ with pytest.raises(ValueError, match=expected_msg):
321
+ gbdt.fit(X, y)
322
+
323
+ monotonic_cst = {"a": 1}
324
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
325
+ expected_msg = re.escape(
326
+ "HistGradientBoostingRegressor was not fitted on data with feature "
327
+ "names. Pass monotonic_cst as an integer array instead."
328
+ )
329
+ with pytest.raises(ValueError, match=expected_msg):
330
+ gbdt.fit(X.values, y)
331
+
332
+ monotonic_cst = {"b": -1, "a": "+"}
333
+ gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
334
+ expected_msg = re.escape("monotonic_cst['a'] must be either -1, 0 or 1. Got '+'.")
335
+ with pytest.raises(ValueError, match=expected_msg):
336
+ gbdt.fit(X, y)
337
+
338
+
339
+ def test_bounded_value_min_gain_to_split():
340
+ # The purpose of this test is to show that when computing the gain at a
341
+ # given split, the value of the current node should be properly bounded to
342
+ # respect the monotonic constraints, because it strongly interacts with
343
+ # min_gain_to_split. We build a simple example where gradients are [1, 1,
344
+ # 100, 1, 1] (hessians are all ones). The best split happens on the 3rd
345
+ # bin, and depending on whether the value of the node is bounded or not,
346
+ # the min_gain_to_split constraint is or isn't satisfied.
347
+ l2_regularization = 0
348
+ min_hessian_to_split = 0
349
+ min_samples_leaf = 1
350
+ n_bins = n_samples = 5
351
+ X_binned = np.arange(n_samples).reshape(-1, 1).astype(X_BINNED_DTYPE)
352
+ sample_indices = np.arange(n_samples, dtype=np.uint32)
353
+ all_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
354
+ all_gradients = np.array([1, 1, 100, 1, 1], dtype=G_H_DTYPE)
355
+ sum_gradients = all_gradients.sum()
356
+ sum_hessians = all_hessians.sum()
357
+ hessians_are_constant = False
358
+
359
+ builder = HistogramBuilder(
360
+ X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
361
+ )
362
+ n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32)
363
+ has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
364
+ monotonic_cst = np.array(
365
+ [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
366
+ )
367
+ is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
368
+ missing_values_bin_idx = n_bins - 1
369
+ children_lower_bound, children_upper_bound = -np.inf, np.inf
370
+
371
+ min_gain_to_split = 2000
372
+ splitter = Splitter(
373
+ X_binned,
374
+ n_bins_non_missing,
375
+ missing_values_bin_idx,
376
+ has_missing_values,
377
+ is_categorical,
378
+ monotonic_cst,
379
+ l2_regularization,
380
+ min_hessian_to_split,
381
+ min_samples_leaf,
382
+ min_gain_to_split,
383
+ hessians_are_constant,
384
+ )
385
+
386
+ histograms = builder.compute_histograms_brute(sample_indices)
387
+
388
+ # Since the gradient array is [1, 1, 100, 1, 1]
389
+ # the max possible gain happens on the 3rd bin (or equivalently in the 2nd)
390
+ # and is equal to about 1307, which less than min_gain_to_split = 2000, so
391
+ # the node is considered unsplittable (gain = -1)
392
+ current_lower_bound, current_upper_bound = -np.inf, np.inf
393
+ value = compute_node_value(
394
+ sum_gradients,
395
+ sum_hessians,
396
+ current_lower_bound,
397
+ current_upper_bound,
398
+ l2_regularization,
399
+ )
400
+ # the unbounded value is equal to -sum_gradients / sum_hessians
401
+ assert value == pytest.approx(-104 / 5)
402
+ split_info = splitter.find_node_split(
403
+ n_samples,
404
+ histograms,
405
+ sum_gradients,
406
+ sum_hessians,
407
+ value,
408
+ lower_bound=children_lower_bound,
409
+ upper_bound=children_upper_bound,
410
+ )
411
+ assert split_info.gain == -1 # min_gain_to_split not respected
412
+
413
+ # here again the max possible gain is on the 3rd bin but we now cap the
414
+ # value of the node into [-10, inf].
415
+ # This means the gain is now about 2430 which is more than the
416
+ # min_gain_to_split constraint.
417
+ current_lower_bound, current_upper_bound = -10, np.inf
418
+ value = compute_node_value(
419
+ sum_gradients,
420
+ sum_hessians,
421
+ current_lower_bound,
422
+ current_upper_bound,
423
+ l2_regularization,
424
+ )
425
+ assert value == -10
426
+ split_info = splitter.find_node_split(
427
+ n_samples,
428
+ histograms,
429
+ sum_gradients,
430
+ sum_hessians,
431
+ value,
432
+ lower_bound=children_lower_bound,
433
+ upper_bound=children_upper_bound,
434
+ )
435
+ assert split_info.gain > min_gain_to_split
venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose
4
+
5
+ from sklearn.datasets import make_regression
6
+ from sklearn.ensemble._hist_gradient_boosting._bitset import (
7
+ set_bitset_memoryview,
8
+ set_raw_bitset_from_binned_bitset,
9
+ )
10
+ from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
11
+ from sklearn.ensemble._hist_gradient_boosting.common import (
12
+ ALMOST_INF,
13
+ G_H_DTYPE,
14
+ PREDICTOR_RECORD_DTYPE,
15
+ X_BINNED_DTYPE,
16
+ X_BITSET_INNER_DTYPE,
17
+ X_DTYPE,
18
+ )
19
+ from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
20
+ from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
21
+ from sklearn.metrics import r2_score
22
+ from sklearn.model_selection import train_test_split
23
+ from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
24
+
25
+ n_threads = _openmp_effective_n_threads()
26
+
27
+
28
+ @pytest.mark.parametrize("n_bins", [200, 256])
29
+ def test_regression_dataset(n_bins):
30
+ X, y = make_regression(
31
+ n_samples=500, n_features=10, n_informative=5, random_state=42
32
+ )
33
+ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
34
+
35
+ mapper = _BinMapper(n_bins=n_bins, random_state=42)
36
+ X_train_binned = mapper.fit_transform(X_train)
37
+
38
+ # Init gradients and hessians to that of least squares loss
39
+ gradients = -y_train.astype(G_H_DTYPE)
40
+ hessians = np.ones(1, dtype=G_H_DTYPE)
41
+
42
+ min_samples_leaf = 10
43
+ max_leaf_nodes = 30
44
+ grower = TreeGrower(
45
+ X_train_binned,
46
+ gradients,
47
+ hessians,
48
+ min_samples_leaf=min_samples_leaf,
49
+ max_leaf_nodes=max_leaf_nodes,
50
+ n_bins=n_bins,
51
+ n_bins_non_missing=mapper.n_bins_non_missing_,
52
+ )
53
+ grower.grow()
54
+
55
+ predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
56
+
57
+ known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
58
+ f_idx_map = np.zeros(0, dtype=np.uint32)
59
+
60
+ y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map, n_threads)
61
+ assert r2_score(y_train, y_pred_train) > 0.82
62
+
63
+ y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map, n_threads)
64
+ assert r2_score(y_test, y_pred_test) > 0.67
65
+
66
+
67
+ @pytest.mark.parametrize(
68
+ "num_threshold, expected_predictions",
69
+ [
70
+ (-np.inf, [0, 1, 1, 1]),
71
+ (10, [0, 0, 1, 1]),
72
+ (20, [0, 0, 0, 1]),
73
+ (ALMOST_INF, [0, 0, 0, 1]),
74
+ (np.inf, [0, 0, 0, 0]),
75
+ ],
76
+ )
77
+ def test_infinite_values_and_thresholds(num_threshold, expected_predictions):
78
+ # Make sure infinite values and infinite thresholds are handled properly.
79
+ # In particular, if a value is +inf and the threshold is ALMOST_INF the
80
+ # sample should go to the right child. If the threshold is inf (split on
81
+ # nan), the +inf sample will go to the left child.
82
+
83
+ X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
84
+ nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
85
+
86
+ # We just construct a simple tree with 1 root and 2 children
87
+ # parent node
88
+ nodes[0]["left"] = 1
89
+ nodes[0]["right"] = 2
90
+ nodes[0]["feature_idx"] = 0
91
+ nodes[0]["num_threshold"] = num_threshold
92
+
93
+ # left child
94
+ nodes[1]["is_leaf"] = True
95
+ nodes[1]["value"] = 0
96
+
97
+ # right child
98
+ nodes[2]["is_leaf"] = True
99
+ nodes[2]["value"] = 1
100
+
101
+ binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
102
+ raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
103
+ known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
104
+ f_idx_map = np.zeros(0, dtype=np.uint32)
105
+
106
+ predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
107
+ predictions = predictor.predict(X, known_cat_bitset, f_idx_map, n_threads)
108
+
109
+ assert np.all(predictions == expected_predictions)
110
+
111
+
112
+ @pytest.mark.parametrize(
113
+ "bins_go_left, expected_predictions",
114
+ [
115
+ ([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]),
116
+ ([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]),
117
+ ([3, 5, 6], [0, 0, 0, 1, 0, 1]),
118
+ ],
119
+ )
120
+ def test_categorical_predictor(bins_go_left, expected_predictions):
121
+ # Test predictor outputs are correct with categorical features
122
+
123
+ X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T
124
+ categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE)
125
+
126
+ bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE)
127
+
128
+ # We just construct a simple tree with 1 root and 2 children
129
+ # parent node
130
+ nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
131
+ nodes[0]["left"] = 1
132
+ nodes[0]["right"] = 2
133
+ nodes[0]["feature_idx"] = 0
134
+ nodes[0]["is_categorical"] = True
135
+ nodes[0]["missing_go_to_left"] = True
136
+
137
+ # left child
138
+ nodes[1]["is_leaf"] = True
139
+ nodes[1]["value"] = 1
140
+
141
+ # right child
142
+ nodes[2]["is_leaf"] = True
143
+ nodes[2]["value"] = 0
144
+
145
+ binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
146
+ raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
147
+ for go_left in bins_go_left:
148
+ set_bitset_memoryview(binned_cat_bitsets[0], go_left)
149
+
150
+ set_raw_bitset_from_binned_bitset(
151
+ raw_categorical_bitsets[0], binned_cat_bitsets[0], categories
152
+ )
153
+
154
+ predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
155
+
156
+ # Check binned data gives correct predictions
157
+ prediction_binned = predictor.predict_binned(
158
+ X_binned, missing_values_bin_idx=6, n_threads=n_threads
159
+ )
160
+ assert_allclose(prediction_binned, expected_predictions)
161
+
162
+ # manually construct bitset
163
+ known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32)
164
+ known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32)
165
+ f_idx_map = np.array([0], dtype=np.uint32)
166
+
167
+ # Check with un-binned data
168
+ predictions = predictor.predict(
169
+ categories.reshape(-1, 1), known_cat_bitsets, f_idx_map, n_threads
170
+ )
171
+ assert_allclose(predictions, expected_predictions)
172
+
173
+ # Check missing goes left because missing_values_bin_idx=6
174
+ X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T
175
+ predictions = predictor.predict_binned(
176
+ X_binned_missing, missing_values_bin_idx=6, n_threads=n_threads
177
+ )
178
+ assert_allclose(predictions, [1])
179
+
180
+ # missing and unknown go left
181
+ predictions = predictor.predict(
182
+ np.array([[np.nan, 17]], dtype=X_DTYPE).T,
183
+ known_cat_bitsets,
184
+ f_idx_map,
185
+ n_threads,
186
+ )
187
+ assert_allclose(predictions, [1, 1])