diff --git a/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b3ebcfe81439574e7c825ac0168ac65e3dd3034e --- /dev/null +++ b/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5f694f7b53a61f0d968ce9e518ae41e89a589a8c3c46b07302fce16bb4f2bc8 +size 50332828 diff --git a/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b24c4678525b43d682dab3c39889ec690b8ba622 --- /dev/null +++ b/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b744b93c965ec89d11d736c8cdd1af0c4ae4eb62f7915a1ce6fccba03c6853f +size 50332749 diff --git a/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..7611dcd738dfecffa87b7c8d962b5c0510ae4286 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d79d4989b5ad3458dd6c02e7e41619e8e519e2384e4a24b2b8f15483a869a33 +size 9372 diff --git a/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4bfd24332d1b56fdf5a702c8beae5f35734c3008 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19aac9fdbb249e7ab8da57ca7275753adf58d72f1d5292899fad981ff43e6a4c +size 9387 diff --git a/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..50960347b4178ca1014775bd8b2533a9460dd3b0 --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c75f03de837521794edd7528f417105fc4b6d81ab0862b294e899bcbb0996c2e +size 9293 diff --git a/ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a307cd9daa3b069f16c56d3da3010d0f3f7a7b7d --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82a32b5ef592e33ea72014b53b265d66a601c7412f9dcc5abb8aa7d30aff3132 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/23.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/23.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..0dc50e6a854c2e03678886dcf766d3ef4b365b57 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfe9f1cd80247701be0ce8c75c38c50efdc796f882d2538cbb4c6930450055c0 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15251e0b47e980641a911801e9ae510b7aad0055 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3578f1d1fc1381b7fa2a5c01e33c7695c57c40f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_bagging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..541e6bd13dc067ae61a4bb25158c16e6b76cb014 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f51473b78c8b0af06d238cf1f9293fae53070c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_forest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51353def233203a97566cf4b0c5284b9c598059a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_gb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42f16416bc64032752512e4b2f4c5e7dacfc0233 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_iforest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..402b317b315a1887b6006a77be0efa3623e59bb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_stacking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fb4907e5c174ee51846140b9cf06f4adae5730a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_voting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b29e128c840ec290220b40a1f44eb9fc5dc0f77 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/__pycache__/_weight_boosting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4bbc8b74e8000f2394692155da9eff2ef9bdd39 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56ba950631f9b1854566b10f4f9bcee5f8101638 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/binning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6565948d2bcd676caa16c185383d82d3c83c90a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/gradient_boosting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abd30752ef5c7a5cbb287033e753d8ce6f8ac95d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/grower.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4b428061ed5424d88676a04624a7a879ba5d9fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__pycache__/predictor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..343ffa1191b226ede2c1604761a189dd93417543 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_bitset.pxd @@ -0,0 +1,18 @@ +from .common cimport X_BINNED_DTYPE_C +from .common cimport BITSET_DTYPE_C +from .common cimport BITSET_INNER_DTYPE_C +from .common cimport X_DTYPE_C + +cdef void init_bitset(BITSET_DTYPE_C bitset) noexcept nogil + +cdef void set_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil + +cdef unsigned char in_bitset(BITSET_DTYPE_C bitset, X_BINNED_DTYPE_C val) noexcept nogil + +cpdef unsigned char in_bitset_memoryview(const BITSET_INNER_DTYPE_C[:] bitset, + X_BINNED_DTYPE_C val) noexcept nogil + +cdef unsigned char in_bitset_2d_memoryview( + const BITSET_INNER_DTYPE_C [:, :] bitset, + X_BINNED_DTYPE_C val, + unsigned int row) noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..88fe8581130bdc34f62303784988daf094d6c4bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/_gradient_boosting.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py new file mode 100644 index 0000000000000000000000000000000000000000..3ab90aadcb6bb36f81d38e08d0303864acaae4b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/binning.py @@ -0,0 +1,321 @@ +""" +This module contains the BinMapper class. + +BinMapper is used for mapping a real-valued dataset into integer-valued bins. +Bin thresholds are computed with the quantiles so that each bin contains +approximately the same number of samples. +""" +# Author: Nicolas Hug + +import numpy as np + +from ...base import BaseEstimator, TransformerMixin +from ...utils import check_array, check_random_state +from ...utils._openmp_helpers import _openmp_effective_n_threads +from ...utils.fixes import percentile +from ...utils.validation import check_is_fitted +from ._binning import _map_to_bins +from ._bitset import set_bitset_memoryview +from .common import ALMOST_INF, X_BINNED_DTYPE, X_BITSET_INNER_DTYPE, X_DTYPE + + +def _find_binning_thresholds(col_data, max_bins): + """Extract quantiles from a continuous feature. + + Missing values are ignored for finding the thresholds. + + Parameters + ---------- + col_data : array-like, shape (n_samples,) + The continuous feature to bin. + max_bins: int + The maximum number of bins to use for non-missing values. If for a + given feature the number of unique values is less than ``max_bins``, + then those unique values will be used to compute the bin thresholds, + instead of the quantiles + + Return + ------ + binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,) + The increasing numeric values that can be used to separate the bins. + A given value x will be mapped into bin value i iff + bining_thresholds[i - 1] < x <= binning_thresholds[i] + """ + # ignore missing values when computing bin thresholds + missing_mask = np.isnan(col_data) + if missing_mask.any(): + col_data = col_data[~missing_mask] + col_data = np.ascontiguousarray(col_data, dtype=X_DTYPE) + distinct_values = np.unique(col_data) + if len(distinct_values) <= max_bins: + midpoints = distinct_values[:-1] + distinct_values[1:] + midpoints *= 0.5 + else: + # We sort again the data in this case. We could compute + # approximate midpoint percentiles using the output of + # np.unique(col_data, return_counts) instead but this is more + # work and the performance benefit will be limited because we + # work on a fixed-size subsample of the full data. + percentiles = np.linspace(0, 100, num=max_bins + 1) + percentiles = percentiles[1:-1] + midpoints = percentile(col_data, percentiles, method="midpoint").astype(X_DTYPE) + assert midpoints.shape[0] == max_bins - 1 + + # We avoid having +inf thresholds: +inf thresholds are only allowed in + # a "split on nan" situation. + np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints) + return midpoints + + +class _BinMapper(TransformerMixin, BaseEstimator): + """Transformer that maps a dataset into integer-valued bins. + + For continuous features, the bins are created in a feature-wise fashion, + using quantiles so that each bins contains approximately the same number + of samples. For large datasets, quantiles are computed on a subset of the + data to speed-up the binning, but the quantiles should remain stable. + + For categorical features, the raw categorical values are expected to be + in [0, 254] (this is not validated here though) and each category + corresponds to a bin. All categorical values must be known at + initialization: transform() doesn't know how to bin unknown categorical + values. Note that transform() is only used on non-training data in the + case of early stopping. + + Features with a small number of values may be binned into less than + ``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved + for missing values. + + Parameters + ---------- + n_bins : int, default=256 + The maximum number of bins to use (including the bin for missing + values). Should be in [3, 256]. Non-missing values are binned on + ``max_bins = n_bins - 1`` bins. The last bin is always reserved for + missing values. If for a given feature the number of unique values is + less than ``max_bins``, then those unique values will be used to + compute the bin thresholds, instead of the quantiles. For categorical + features indicated by ``is_categorical``, the docstring for + ``is_categorical`` details on this procedure. + subsample : int or None, default=2e5 + If ``n_samples > subsample``, then ``sub_samples`` samples will be + randomly chosen to compute the quantiles. If ``None``, the whole data + is used. + is_categorical : ndarray of bool of shape (n_features,), default=None + Indicates categorical features. By default, all features are + considered continuous. + known_categories : list of {ndarray, None} of shape (n_features,), \ + default=none + For each categorical feature, the array indicates the set of unique + categorical values. These should be the possible values over all the + data, not just the training data. For continuous features, the + corresponding entry should be None. + random_state: int, RandomState instance or None, default=None + Pseudo-random number generator to control the random sub-sampling. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + n_threads : int, default=None + Number of OpenMP threads to use. `_openmp_effective_n_threads` is called + to determine the effective number of threads use, which takes cgroups CPU + quotes into account. See the docstring of `_openmp_effective_n_threads` + for details. + + Attributes + ---------- + bin_thresholds_ : list of ndarray + For each feature, each array indicates how to map a feature into a + binned feature. The semantic and size depends on the nature of the + feature: + - for real-valued features, the array corresponds to the real-valued + bin thresholds (the upper bound of each bin). There are ``max_bins + - 1`` thresholds, where ``max_bins = n_bins - 1`` is the number of + bins used for non-missing values. + - for categorical features, the array is a map from a binned category + value to the raw category value. The size of the array is equal to + ``min(max_bins, category_cardinality)`` where we ignore missing + values in the cardinality. + n_bins_non_missing_ : ndarray, dtype=np.uint32 + For each feature, gives the number of bins actually used for + non-missing values. For features with a lot of unique values, this is + equal to ``n_bins - 1``. + is_categorical_ : ndarray of shape (n_features,), dtype=np.uint8 + Indicator for categorical features. + missing_values_bin_idx_ : np.uint8 + The index of the bin where missing values are mapped. This is a + constant across all features. This corresponds to the last bin, and + it is always equal to ``n_bins - 1``. Note that if ``n_bins_non_missing_`` + is less than ``n_bins - 1`` for a given feature, then there are + empty (and unused) bins. + """ + + def __init__( + self, + n_bins=256, + subsample=int(2e5), + is_categorical=None, + known_categories=None, + random_state=None, + n_threads=None, + ): + self.n_bins = n_bins + self.subsample = subsample + self.is_categorical = is_categorical + self.known_categories = known_categories + self.random_state = random_state + self.n_threads = n_threads + + def fit(self, X, y=None): + """Fit data X by computing the binning thresholds. + + The last bin is reserved for missing values, whether missing values + are present in the data or not. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to bin. + y: None + Ignored. + + Returns + ------- + self : object + """ + if not (3 <= self.n_bins <= 256): + # min is 3: at least 2 distinct bins and a missing values bin + raise ValueError( + "n_bins={} should be no smaller than 3 and no larger than 256.".format( + self.n_bins + ) + ) + + X = check_array(X, dtype=[X_DTYPE], force_all_finite=False) + max_bins = self.n_bins - 1 + + rng = check_random_state(self.random_state) + if self.subsample is not None and X.shape[0] > self.subsample: + subset = rng.choice(X.shape[0], self.subsample, replace=False) + X = X.take(subset, axis=0) + + if self.is_categorical is None: + self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8) + else: + self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8) + + n_features = X.shape[1] + known_categories = self.known_categories + if known_categories is None: + known_categories = [None] * n_features + + # validate is_categorical and known_categories parameters + for f_idx in range(n_features): + is_categorical = self.is_categorical_[f_idx] + known_cats = known_categories[f_idx] + if is_categorical and known_cats is None: + raise ValueError( + f"Known categories for feature {f_idx} must be provided." + ) + if not is_categorical and known_cats is not None: + raise ValueError( + f"Feature {f_idx} isn't marked as a categorical feature, " + "but categories were passed." + ) + + self.missing_values_bin_idx_ = self.n_bins - 1 + + self.bin_thresholds_ = [] + n_bins_non_missing = [] + + for f_idx in range(n_features): + if not self.is_categorical_[f_idx]: + thresholds = _find_binning_thresholds(X[:, f_idx], max_bins) + n_bins_non_missing.append(thresholds.shape[0] + 1) + else: + # Since categories are assumed to be encoded in + # [0, n_cats] and since n_cats <= max_bins, + # the thresholds *are* the unique categorical values. This will + # lead to the correct mapping in transform() + thresholds = known_categories[f_idx] + n_bins_non_missing.append(thresholds.shape[0]) + + self.bin_thresholds_.append(thresholds) + + self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32) + return self + + def transform(self, X): + """Bin data X. + + Missing values will be mapped to the last bin. + + For categorical features, the mapping will be incorrect for unknown + categories. Since the BinMapper is given known_categories of the + entire training data (i.e. before the call to train_test_split() in + case of early-stopping), this never happens. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data to bin. + + Returns + ------- + X_binned : array-like of shape (n_samples, n_features) + The binned data (fortran-aligned). + """ + X = check_array(X, dtype=[X_DTYPE], force_all_finite=False) + check_is_fitted(self) + if X.shape[1] != self.n_bins_non_missing_.shape[0]: + raise ValueError( + "This estimator was fitted with {} features but {} got passed " + "to transform()".format(self.n_bins_non_missing_.shape[0], X.shape[1]) + ) + + n_threads = _openmp_effective_n_threads(self.n_threads) + binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F") + _map_to_bins( + X, + self.bin_thresholds_, + self.is_categorical_, + self.missing_values_bin_idx_, + n_threads, + binned, + ) + return binned + + def make_known_categories_bitsets(self): + """Create bitsets of known categories. + + Returns + ------- + - known_cat_bitsets : ndarray of shape (n_categorical_features, 8) + Array of bitsets of known categories, for each categorical feature. + - f_idx_map : ndarray of shape (n_features,) + Map from original feature index to the corresponding index in the + known_cat_bitsets array. + """ + + categorical_features_indices = np.flatnonzero(self.is_categorical_) + + n_features = self.is_categorical_.size + n_categorical_features = categorical_features_indices.size + + f_idx_map = np.zeros(n_features, dtype=np.uint32) + f_idx_map[categorical_features_indices] = np.arange( + n_categorical_features, dtype=np.uint32 + ) + + known_categories = self.bin_thresholds_ + + known_cat_bitsets = np.zeros( + (n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE + ) + + # TODO: complexity is O(n_categorical_features * 255). Maybe this is + # worth cythonizing + for mapped_f_idx, f_idx in enumerate(categorical_features_indices): + for raw_cat_val in known_categories[f_idx]: + set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val) + + return known_cat_bitsets, f_idx_map diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..98e8a86cd453474a9a26f9306777e66017ae75f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3e71f2dc56060941d884580590b4ea93cd00fa94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd @@ -0,0 +1,44 @@ +cimport numpy as cnp +from sklearn.utils._typedefs cimport intp_t + +cnp.import_array() + + +ctypedef cnp.npy_float64 X_DTYPE_C +ctypedef cnp.npy_uint8 X_BINNED_DTYPE_C +ctypedef cnp.npy_float64 Y_DTYPE_C +ctypedef cnp.npy_float32 G_H_DTYPE_C +ctypedef cnp.npy_uint32 BITSET_INNER_DTYPE_C +ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C + +cdef packed struct hist_struct: + # Same as histogram dtype but we need a struct to declare views. It needs + # to be packed since by default numpy dtypes aren't aligned + Y_DTYPE_C sum_gradients + Y_DTYPE_C sum_hessians + unsigned int count + + +cdef packed struct node_struct: + # Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It + # needs to be packed since by default numpy dtypes aren't aligned + Y_DTYPE_C value + unsigned int count + intp_t feature_idx + X_DTYPE_C num_threshold + unsigned char missing_go_to_left + unsigned int left + unsigned int right + Y_DTYPE_C gain + unsigned int depth + unsigned char is_leaf + X_BINNED_DTYPE_C bin_threshold + unsigned char is_categorical + # The index of the corresponding bitsets in the Predictor's bitset arrays. + # Only used if is_categorical is True + unsigned int bitset_idx + +cpdef enum MonotonicConstraint: + NO_CST = 0 + POS = 1 + NEG = -1 diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..0e9e09b8008b5dcfef94358d586a76dcf2f2244c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py @@ -0,0 +1,2270 @@ +"""Fast Gradient Boosting decision trees for classification and regression.""" + +# Author: Nicolas Hug + +import itertools +import warnings +from abc import ABC, abstractmethod +from contextlib import contextmanager, nullcontext, suppress +from functools import partial +from numbers import Integral, Real +from time import time + +import numpy as np + +from ..._loss.loss import ( + _LOSSES, + BaseLoss, + HalfBinomialLoss, + HalfGammaLoss, + HalfMultinomialLoss, + HalfPoissonLoss, + PinballLoss, +) +from ...base import ( + BaseEstimator, + ClassifierMixin, + RegressorMixin, + _fit_context, + is_classifier, +) +from ...compose import ColumnTransformer +from ...metrics import check_scoring +from ...metrics._scorer import _SCORERS +from ...model_selection import train_test_split +from ...preprocessing import FunctionTransformer, LabelEncoder, OrdinalEncoder +from ...utils import check_random_state, compute_sample_weight, is_scalar_nan, resample +from ...utils._openmp_helpers import _openmp_effective_n_threads +from ...utils._param_validation import Hidden, Interval, RealNotInt, StrOptions +from ...utils.multiclass import check_classification_targets +from ...utils.validation import ( + _check_monotonic_cst, + _check_sample_weight, + _check_y, + _is_pandas_df, + check_array, + check_consistent_length, + check_is_fitted, +) +from ._gradient_boosting import _update_raw_predictions +from .binning import _BinMapper +from .common import G_H_DTYPE, X_DTYPE, Y_DTYPE +from .grower import TreeGrower + +_LOSSES = _LOSSES.copy() +_LOSSES.update( + { + "poisson": HalfPoissonLoss, + "gamma": HalfGammaLoss, + "quantile": PinballLoss, + } +) + + +def _update_leaves_values(loss, grower, y_true, raw_prediction, sample_weight): + """Update the leaf values to be predicted by the tree. + + Update equals: + loss.fit_intercept_only(y_true - raw_prediction) + + This is only applied if loss.differentiable is False. + Note: It only works, if the loss is a function of the residual, as is the + case for AbsoluteError and PinballLoss. Otherwise, one would need to get + the minimum of loss(y_true, raw_prediction + x) in x. A few examples: + - AbsoluteError: median(y_true - raw_prediction). + - PinballLoss: quantile(y_true - raw_prediction). + + More background: + For the standard gradient descent method according to "Greedy Function + Approximation: A Gradient Boosting Machine" by Friedman, all loss functions but the + squared loss need a line search step. BaseHistGradientBoosting, however, implements + a so called Newton boosting where the trees are fitted to a 2nd order + approximations of the loss in terms of gradients and hessians. In this case, the + line search step is only necessary if the loss is not smooth, i.e. not + differentiable, which renders the 2nd order approximation invalid. In fact, + non-smooth losses arbitrarily set hessians to 1 and effectively use the standard + gradient descent method with line search. + """ + # TODO: Ideally this should be computed in parallel over the leaves using something + # similar to _update_raw_predictions(), but this requires a cython version of + # median(). + for leaf in grower.finalized_leaves: + indices = leaf.sample_indices + if sample_weight is None: + sw = None + else: + sw = sample_weight[indices] + update = loss.fit_intercept_only( + y_true=y_true[indices] - raw_prediction[indices], + sample_weight=sw, + ) + leaf.value = grower.shrinkage * update + # Note that the regularization is ignored here + + +@contextmanager +def _patch_raw_predict(estimator, raw_predictions): + """Context manager that patches _raw_predict to return raw_predictions. + + `raw_predictions` is typically a precomputed array to avoid redundant + state-wise computations fitting with early stopping enabled: in this case + `raw_predictions` is incrementally updated whenever we add a tree to the + boosted ensemble. + + Note: this makes fitting HistGradientBoosting* models inherently non thread + safe at fit time. However thread-safety at fit time was never guaranteed nor + enforced for scikit-learn estimators in general. + + Thread-safety at prediction/transform time is another matter as those + operations are typically side-effect free and therefore often thread-safe by + default for most scikit-learn models and would like to keep it that way. + Therefore this context manager should only be used at fit time. + + TODO: in the future, we could explore the possibility to extend the scorer + public API to expose a way to compute vales from raw predictions. That would + probably require also making the scorer aware of the inverse link function + used by the estimator which is typically private API for now, hence the need + for this patching mechanism. + """ + orig_raw_predict = estimator._raw_predict + + def _patched_raw_predicts(*args, **kwargs): + return raw_predictions + + estimator._raw_predict = _patched_raw_predicts + yield estimator + estimator._raw_predict = orig_raw_predict + + +class BaseHistGradientBoosting(BaseEstimator, ABC): + """Base class for histogram-based gradient boosting estimators.""" + + _parameter_constraints: dict = { + "loss": [BaseLoss], + "learning_rate": [Interval(Real, 0, None, closed="neither")], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None], + "max_depth": [Interval(Integral, 1, None, closed="left"), None], + "min_samples_leaf": [Interval(Integral, 1, None, closed="left")], + "l2_regularization": [Interval(Real, 0, None, closed="left")], + "max_features": [Interval(RealNotInt, 0, 1, closed="right")], + "monotonic_cst": ["array-like", dict, None], + "interaction_cst": [ + list, + tuple, + StrOptions({"pairwise", "no_interactions"}), + None, + ], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + "validation_fraction": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(Integral, 1, None, closed="left"), + None, + ], + "tol": [Interval(Real, 0, None, closed="left")], + "max_bins": [Interval(Integral, 2, 255, closed="both")], + "categorical_features": [ + "array-like", + StrOptions({"from_dtype"}), + Hidden(StrOptions({"warn"})), + None, + ], + "warm_start": ["boolean"], + "early_stopping": [StrOptions({"auto"}), "boolean"], + "scoring": [str, callable, None], + "verbose": ["verbose"], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + loss, + *, + learning_rate, + max_iter, + max_leaf_nodes, + max_depth, + min_samples_leaf, + l2_regularization, + max_features, + max_bins, + categorical_features, + monotonic_cst, + interaction_cst, + warm_start, + early_stopping, + scoring, + validation_fraction, + n_iter_no_change, + tol, + verbose, + random_state, + ): + self.loss = loss + self.learning_rate = learning_rate + self.max_iter = max_iter + self.max_leaf_nodes = max_leaf_nodes + self.max_depth = max_depth + self.min_samples_leaf = min_samples_leaf + self.l2_regularization = l2_regularization + self.max_features = max_features + self.max_bins = max_bins + self.monotonic_cst = monotonic_cst + self.interaction_cst = interaction_cst + self.categorical_features = categorical_features + self.warm_start = warm_start + self.early_stopping = early_stopping + self.scoring = scoring + self.validation_fraction = validation_fraction + self.n_iter_no_change = n_iter_no_change + self.tol = tol + self.verbose = verbose + self.random_state = random_state + + def _validate_parameters(self): + """Validate parameters passed to __init__. + + The parameters that are directly passed to the grower are checked in + TreeGrower.""" + if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1: + raise ValueError( + "monotonic constraints are not supported for multiclass classification." + ) + + def _finalize_sample_weight(self, sample_weight, y): + """Finalize sample weight. + + Used by subclasses to adjust sample_weights. This is useful for implementing + class weights. + """ + return sample_weight + + def _preprocess_X(self, X, *, reset): + """Preprocess and validate X. + + Parameters + ---------- + X : {array-like, pandas DataFrame} of shape (n_samples, n_features) + Input data. + + reset : bool + Whether to reset the `n_features_in_` and `feature_names_in_ attributes. + + Returns + ------- + X : ndarray of shape (n_samples, n_features) + Validated input data. + + known_categories : list of ndarray of shape (n_categories,) + List of known categories for each categorical feature. + """ + # If there is a preprocessor, we let the preprocessor handle the validation. + # Otherwise, we validate the data ourselves. + check_X_kwargs = dict(dtype=[X_DTYPE], force_all_finite=False) + if not reset: + if self._preprocessor is None: + return self._validate_data(X, reset=False, **check_X_kwargs) + return self._preprocessor.transform(X) + + # At this point, reset is False, which runs during `fit`. + self.is_categorical_ = self._check_categorical_features(X) + + if self.is_categorical_ is None: + self._preprocessor = None + self._is_categorical_remapped = None + + X = self._validate_data(X, **check_X_kwargs) + return X, None + + n_features = X.shape[1] + ordinal_encoder = OrdinalEncoder( + categories="auto", + handle_unknown="use_encoded_value", + unknown_value=np.nan, + encoded_missing_value=np.nan, + dtype=X_DTYPE, + ) + + check_X = partial(check_array, **check_X_kwargs) + numerical_preprocessor = FunctionTransformer(check_X) + self._preprocessor = ColumnTransformer( + [ + ("encoder", ordinal_encoder, self.is_categorical_), + ("numerical", numerical_preprocessor, ~self.is_categorical_), + ] + ) + self._preprocessor.set_output(transform="default") + X = self._preprocessor.fit_transform(X) + # check categories found by the OrdinalEncoder and get their encoded values + known_categories = self._check_categories() + self.n_features_in_ = self._preprocessor.n_features_in_ + with suppress(AttributeError): + self.feature_names_in_ = self._preprocessor.feature_names_in_ + + # The ColumnTransformer's output places the categorical features at the + # beginning + categorical_remapped = np.zeros(n_features, dtype=bool) + categorical_remapped[self._preprocessor.output_indices_["encoder"]] = True + self._is_categorical_remapped = categorical_remapped + + return X, known_categories + + def _check_categories(self): + """Check categories found by the preprocessor and return their encoded values. + + Returns a list of length ``self.n_features_in_``, with one entry per + input feature. + + For non-categorical features, the corresponding entry is ``None``. + + For categorical features, the corresponding entry is an array + containing the categories as encoded by the preprocessor (an + ``OrdinalEncoder``), excluding missing values. The entry is therefore + ``np.arange(n_categories)`` where ``n_categories`` is the number of + unique values in the considered feature column, after removing missing + values. + + If ``n_categories > self.max_bins`` for any feature, a ``ValueError`` + is raised. + """ + encoder = self._preprocessor.named_transformers_["encoder"] + known_categories = [None] * self._preprocessor.n_features_in_ + categorical_column_indices = np.arange(self._preprocessor.n_features_in_)[ + self._preprocessor.output_indices_["encoder"] + ] + for feature_idx, categories in zip( + categorical_column_indices, encoder.categories_ + ): + # OrdinalEncoder always puts np.nan as the last category if the + # training data has missing values. Here we remove it because it is + # already added by the _BinMapper. + if len(categories) and is_scalar_nan(categories[-1]): + categories = categories[:-1] + if categories.size > self.max_bins: + try: + feature_name = repr(encoder.feature_names_in_[feature_idx]) + except AttributeError: + feature_name = f"at index {feature_idx}" + raise ValueError( + f"Categorical feature {feature_name} is expected to " + f"have a cardinality <= {self.max_bins} but actually " + f"has a cardinality of {categories.size}." + ) + known_categories[feature_idx] = np.arange(len(categories), dtype=X_DTYPE) + return known_categories + + def _check_categorical_features(self, X): + """Check and validate categorical features in X + + Parameters + ---------- + X : {array-like, pandas DataFrame} of shape (n_samples, n_features) + Input data. + + Return + ------ + is_categorical : ndarray of shape (n_features,) or None, dtype=bool + Indicates whether a feature is categorical. If no feature is + categorical, this is None. + """ + # Special code for pandas because of a bug in recent pandas, which is + # fixed in main and maybe included in 2.2.1, see + # https://github.com/pandas-dev/pandas/pull/57173. + # Also pandas versions < 1.5.1 do not support the dataframe interchange + if _is_pandas_df(X): + X_is_dataframe = True + categorical_columns_mask = np.asarray(X.dtypes == "category") + X_has_categorical_columns = categorical_columns_mask.any() + elif hasattr(X, "__dataframe__"): + X_is_dataframe = True + categorical_columns_mask = np.asarray( + [ + c.dtype[0].name == "CATEGORICAL" + for c in X.__dataframe__().get_columns() + ] + ) + X_has_categorical_columns = categorical_columns_mask.any() + else: + X_is_dataframe = False + categorical_columns_mask = None + X_has_categorical_columns = False + + # TODO(1.6): Remove warning and change default to "from_dtype" in v1.6 + if ( + isinstance(self.categorical_features, str) + and self.categorical_features == "warn" + ): + if X_has_categorical_columns: + warnings.warn( + ( + "The categorical_features parameter will change to 'from_dtype'" + " in v1.6. The 'from_dtype' option automatically treats" + " categorical dtypes in a DataFrame as categorical features." + ), + FutureWarning, + ) + categorical_features = None + else: + categorical_features = self.categorical_features + + categorical_by_dtype = ( + isinstance(categorical_features, str) + and categorical_features == "from_dtype" + ) + no_categorical_dtype = categorical_features is None or ( + categorical_by_dtype and not X_is_dataframe + ) + + if no_categorical_dtype: + return None + + use_pandas_categorical = categorical_by_dtype and X_is_dataframe + if use_pandas_categorical: + categorical_features = categorical_columns_mask + else: + categorical_features = np.asarray(categorical_features) + + if categorical_features.size == 0: + return None + + if categorical_features.dtype.kind not in ("i", "b", "U", "O"): + raise ValueError( + "categorical_features must be an array-like of bool, int or " + f"str, got: {categorical_features.dtype.name}." + ) + + if categorical_features.dtype.kind == "O": + types = set(type(f) for f in categorical_features) + if types != {str}: + raise ValueError( + "categorical_features must be an array-like of bool, int or " + f"str, got: {', '.join(sorted(t.__name__ for t in types))}." + ) + + n_features = X.shape[1] + # At this point `_validate_data` was not called yet because we want to use the + # dtypes are used to discover the categorical features. Thus `feature_names_in_` + # is not defined yet. + feature_names_in_ = getattr(X, "columns", None) + + if categorical_features.dtype.kind in ("U", "O"): + # check for feature names + if feature_names_in_ is None: + raise ValueError( + "categorical_features should be passed as an array of " + "integers or as a boolean mask when the model is fitted " + "on data without feature names." + ) + is_categorical = np.zeros(n_features, dtype=bool) + feature_names = list(feature_names_in_) + for feature_name in categorical_features: + try: + is_categorical[feature_names.index(feature_name)] = True + except ValueError as e: + raise ValueError( + f"categorical_features has a item value '{feature_name}' " + "which is not a valid feature name of the training " + f"data. Observed feature names: {feature_names}" + ) from e + elif categorical_features.dtype.kind == "i": + # check for categorical features as indices + if ( + np.max(categorical_features) >= n_features + or np.min(categorical_features) < 0 + ): + raise ValueError( + "categorical_features set as integer " + "indices must be in [0, n_features - 1]" + ) + is_categorical = np.zeros(n_features, dtype=bool) + is_categorical[categorical_features] = True + else: + if categorical_features.shape[0] != n_features: + raise ValueError( + "categorical_features set as a boolean mask " + "must have shape (n_features,), got: " + f"{categorical_features.shape}" + ) + is_categorical = categorical_features + + if not np.any(is_categorical): + return None + return is_categorical + + def _check_interaction_cst(self, n_features): + """Check and validation for interaction constraints.""" + if self.interaction_cst is None: + return None + + if self.interaction_cst == "no_interactions": + interaction_cst = [[i] for i in range(n_features)] + elif self.interaction_cst == "pairwise": + interaction_cst = itertools.combinations(range(n_features), 2) + else: + interaction_cst = self.interaction_cst + + try: + constraints = [set(group) for group in interaction_cst] + except TypeError: + raise ValueError( + "Interaction constraints must be a sequence of tuples or lists, got:" + f" {self.interaction_cst!r}." + ) + + for group in constraints: + for x in group: + if not (isinstance(x, Integral) and 0 <= x < n_features): + raise ValueError( + "Interaction constraints must consist of integer indices in" + f" [0, n_features - 1] = [0, {n_features - 1}], specifying the" + " position of features, got invalid indices:" + f" {group!r}" + ) + + # Add all not listed features as own group by default. + rest = set(range(n_features)) - set().union(*constraints) + if len(rest) > 0: + constraints.append(rest) + + return constraints + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the gradient boosting model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,) default=None + Weights of training data. + + .. versionadded:: 0.23 + + Returns + ------- + self : object + Fitted estimator. + """ + fit_start_time = time() + acc_find_split_time = 0.0 # time spent finding the best splits + acc_apply_split_time = 0.0 # time spent splitting nodes + acc_compute_hist_time = 0.0 # time spent computing histograms + # time spent predicting X for gradient and hessians update + acc_prediction_time = 0.0 + X, known_categories = self._preprocess_X(X, reset=True) + y = _check_y(y, estimator=self) + y = self._encode_y(y) + check_consistent_length(X, y) + # Do not create unit sample weights by default to later skip some + # computation + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64) + # TODO: remove when PDP supports sample weights + self._fitted_with_sw = True + + sample_weight = self._finalize_sample_weight(sample_weight, y) + + rng = check_random_state(self.random_state) + + # When warm starting, we want to reuse the same seed that was used + # the first time fit was called (e.g. train/val split). + # For feature subsampling, we want to continue with the rng we started with. + if not self.warm_start or not self._is_fitted(): + self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8") + feature_subsample_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8") + self._feature_subsample_rng = np.random.default_rng(feature_subsample_seed) + + self._validate_parameters() + monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst) + + # used for validation in predict + n_samples, self._n_features = X.shape + + # Encode constraints into a list of sets of features indices (integers). + interaction_cst = self._check_interaction_cst(self._n_features) + + # we need this stateful variable to tell raw_predict() that it was + # called from fit() (this current method), and that the data it has + # received is pre-binned. + # predicting is faster on pre-binned data, so we want early stopping + # predictions to be made on pre-binned data. Unfortunately the _scorer + # can only call predict() or predict_proba(), not raw_predict(), and + # there's no way to tell the scorer that it needs to predict binned + # data. + self._in_fit = True + + # `_openmp_effective_n_threads` is used to take cgroups CPU quotes + # into account when determine the maximum number of threads to use. + n_threads = _openmp_effective_n_threads() + + if isinstance(self.loss, str): + self._loss = self._get_loss(sample_weight=sample_weight) + elif isinstance(self.loss, BaseLoss): + self._loss = self.loss + + if self.early_stopping == "auto": + self.do_early_stopping_ = n_samples > 10000 + else: + self.do_early_stopping_ = self.early_stopping + + # create validation data if needed + self._use_validation_data = self.validation_fraction is not None + if self.do_early_stopping_ and self._use_validation_data: + # stratify for classification + # instead of checking predict_proba, loss.n_classes >= 2 would also work + stratify = y if hasattr(self._loss, "predict_proba") else None + + # Save the state of the RNG for the training and validation split. + # This is needed in order to have the same split when using + # warm starting. + + if sample_weight is None: + X_train, X_val, y_train, y_val = train_test_split( + X, + y, + test_size=self.validation_fraction, + stratify=stratify, + random_state=self._random_seed, + ) + sample_weight_train = sample_weight_val = None + else: + # TODO: incorporate sample_weight in sampling here, as well as + # stratify + ( + X_train, + X_val, + y_train, + y_val, + sample_weight_train, + sample_weight_val, + ) = train_test_split( + X, + y, + sample_weight, + test_size=self.validation_fraction, + stratify=stratify, + random_state=self._random_seed, + ) + else: + X_train, y_train, sample_weight_train = X, y, sample_weight + X_val = y_val = sample_weight_val = None + + # Bin the data + # For ease of use of the API, the user-facing GBDT classes accept the + # parameter max_bins, which doesn't take into account the bin for + # missing values (which is always allocated). However, since max_bins + # isn't the true maximal number of bins, all other private classes + # (binmapper, histbuilder...) accept n_bins instead, which is the + # actual total number of bins. Everywhere in the code, the + # convention is that n_bins == max_bins + 1 + n_bins = self.max_bins + 1 # + 1 for missing values + self._bin_mapper = _BinMapper( + n_bins=n_bins, + is_categorical=self._is_categorical_remapped, + known_categories=known_categories, + random_state=self._random_seed, + n_threads=n_threads, + ) + X_binned_train = self._bin_data(X_train, is_training_data=True) + if X_val is not None: + X_binned_val = self._bin_data(X_val, is_training_data=False) + else: + X_binned_val = None + + # Uses binned data to check for missing values + has_missing_values = ( + (X_binned_train == self._bin_mapper.missing_values_bin_idx_) + .any(axis=0) + .astype(np.uint8) + ) + + if self.verbose: + print("Fitting gradient boosted rounds:") + + n_samples = X_binned_train.shape[0] + scoring_is_predefined_string = self.scoring in _SCORERS + need_raw_predictions_val = X_binned_val is not None and ( + scoring_is_predefined_string or self.scoring == "loss" + ) + # First time calling fit, or no warm start + if not (self._is_fitted() and self.warm_start): + # Clear random state and score attributes + self._clear_state() + + # initialize raw_predictions: those are the accumulated values + # predicted by the trees for the training data. raw_predictions has + # shape (n_samples, n_trees_per_iteration) where + # n_trees_per_iterations is n_classes in multiclass classification, + # else 1. + # self._baseline_prediction has shape (1, n_trees_per_iteration) + self._baseline_prediction = self._loss.fit_intercept_only( + y_true=y_train, sample_weight=sample_weight_train + ).reshape((1, -1)) + raw_predictions = np.zeros( + shape=(n_samples, self.n_trees_per_iteration_), + dtype=self._baseline_prediction.dtype, + order="F", + ) + raw_predictions += self._baseline_prediction + + # predictors is a matrix (list of lists) of TreePredictor objects + # with shape (n_iter_, n_trees_per_iteration) + self._predictors = predictors = [] + + # Initialize structures and attributes related to early stopping + self._scorer = None # set if scoring != loss + raw_predictions_val = None # set if use val and scoring is a string + self.train_score_ = [] + self.validation_score_ = [] + + if self.do_early_stopping_: + # populate train_score and validation_score with the + # predictions of the initial model (before the first tree) + + # Create raw_predictions_val for storing the raw predictions of + # the validation data. + if need_raw_predictions_val: + raw_predictions_val = np.zeros( + shape=(X_binned_val.shape[0], self.n_trees_per_iteration_), + dtype=self._baseline_prediction.dtype, + order="F", + ) + + raw_predictions_val += self._baseline_prediction + + if self.scoring == "loss": + # we're going to compute scoring w.r.t the loss. As losses + # take raw predictions as input (unlike the scorers), we + # can optimize a bit and avoid repeating computing the + # predictions of the previous trees. We'll reuse + # raw_predictions (as it's needed for training anyway) for + # evaluating the training loss. + + self._check_early_stopping_loss( + raw_predictions=raw_predictions, + y_train=y_train, + sample_weight_train=sample_weight_train, + raw_predictions_val=raw_predictions_val, + y_val=y_val, + sample_weight_val=sample_weight_val, + n_threads=n_threads, + ) + else: + self._scorer = check_scoring(self, self.scoring) + # _scorer is a callable with signature (est, X, y) and + # calls est.predict() or est.predict_proba() depending on + # its nature. + # Unfortunately, each call to _scorer() will compute + # the predictions of all the trees. So we use a subset of + # the training set to compute train scores. + + # Compute the subsample set + ( + X_binned_small_train, + y_small_train, + sample_weight_small_train, + indices_small_train, + ) = self._get_small_trainset( + X_binned_train, + y_train, + sample_weight_train, + self._random_seed, + ) + + # If the scorer is a predefined string, then we optimize + # the evaluation by re-using the incrementally updated raw + # predictions. + if scoring_is_predefined_string: + raw_predictions_small_train = raw_predictions[ + indices_small_train + ] + else: + raw_predictions_small_train = None + + self._check_early_stopping_scorer( + X_binned_small_train, + y_small_train, + sample_weight_small_train, + X_binned_val, + y_val, + sample_weight_val, + raw_predictions_small_train=raw_predictions_small_train, + raw_predictions_val=raw_predictions_val, + ) + begin_at_stage = 0 + + # warm start: this is not the first time fit was called + else: + # Check that the maximum number of iterations is not smaller + # than the number of iterations from the previous fit + if self.max_iter < self.n_iter_: + raise ValueError( + "max_iter=%d must be larger than or equal to " + "n_iter_=%d when warm_start==True" % (self.max_iter, self.n_iter_) + ) + + # Convert array attributes to lists + self.train_score_ = self.train_score_.tolist() + self.validation_score_ = self.validation_score_.tolist() + + # Compute raw predictions + raw_predictions = self._raw_predict(X_binned_train, n_threads=n_threads) + if self.do_early_stopping_ and need_raw_predictions_val: + raw_predictions_val = self._raw_predict( + X_binned_val, n_threads=n_threads + ) + else: + raw_predictions_val = None + + if self.do_early_stopping_ and self.scoring != "loss": + # Compute the subsample set + ( + X_binned_small_train, + y_small_train, + sample_weight_small_train, + indices_small_train, + ) = self._get_small_trainset( + X_binned_train, y_train, sample_weight_train, self._random_seed + ) + + # Get the predictors from the previous fit + predictors = self._predictors + + begin_at_stage = self.n_iter_ + + # initialize gradients and hessians (empty arrays). + # shape = (n_samples, n_trees_per_iteration). + gradient, hessian = self._loss.init_gradient_and_hessian( + n_samples=n_samples, dtype=G_H_DTYPE, order="F" + ) + + for iteration in range(begin_at_stage, self.max_iter): + if self.verbose: + iteration_start_time = time() + print( + "[{}/{}] ".format(iteration + 1, self.max_iter), end="", flush=True + ) + + # Update gradients and hessians, inplace + # Note that self._loss expects shape (n_samples,) for + # n_trees_per_iteration = 1 else shape (n_samples, n_trees_per_iteration). + if self._loss.constant_hessian: + self._loss.gradient( + y_true=y_train, + raw_prediction=raw_predictions, + sample_weight=sample_weight_train, + gradient_out=gradient, + n_threads=n_threads, + ) + else: + self._loss.gradient_hessian( + y_true=y_train, + raw_prediction=raw_predictions, + sample_weight=sample_weight_train, + gradient_out=gradient, + hessian_out=hessian, + n_threads=n_threads, + ) + + # Append a list since there may be more than 1 predictor per iter + predictors.append([]) + + # 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1) + # on gradient and hessian to simplify the loop over n_trees_per_iteration_. + if gradient.ndim == 1: + g_view = gradient.reshape((-1, 1)) + h_view = hessian.reshape((-1, 1)) + else: + g_view = gradient + h_view = hessian + + # Build `n_trees_per_iteration` trees. + for k in range(self.n_trees_per_iteration_): + grower = TreeGrower( + X_binned=X_binned_train, + gradients=g_view[:, k], + hessians=h_view[:, k], + n_bins=n_bins, + n_bins_non_missing=self._bin_mapper.n_bins_non_missing_, + has_missing_values=has_missing_values, + is_categorical=self._is_categorical_remapped, + monotonic_cst=monotonic_cst, + interaction_cst=interaction_cst, + max_leaf_nodes=self.max_leaf_nodes, + max_depth=self.max_depth, + min_samples_leaf=self.min_samples_leaf, + l2_regularization=self.l2_regularization, + feature_fraction_per_split=self.max_features, + rng=self._feature_subsample_rng, + shrinkage=self.learning_rate, + n_threads=n_threads, + ) + grower.grow() + + acc_apply_split_time += grower.total_apply_split_time + acc_find_split_time += grower.total_find_split_time + acc_compute_hist_time += grower.total_compute_hist_time + + if not self._loss.differentiable: + _update_leaves_values( + loss=self._loss, + grower=grower, + y_true=y_train, + raw_prediction=raw_predictions[:, k], + sample_weight=sample_weight_train, + ) + + predictor = grower.make_predictor( + binning_thresholds=self._bin_mapper.bin_thresholds_ + ) + predictors[-1].append(predictor) + + # Update raw_predictions with the predictions of the newly + # created tree. + tic_pred = time() + _update_raw_predictions(raw_predictions[:, k], grower, n_threads) + toc_pred = time() + acc_prediction_time += toc_pred - tic_pred + + should_early_stop = False + if self.do_early_stopping_: + # Update raw_predictions_val with the newest tree(s) + if need_raw_predictions_val: + for k, pred in enumerate(self._predictors[-1]): + raw_predictions_val[:, k] += pred.predict_binned( + X_binned_val, + self._bin_mapper.missing_values_bin_idx_, + n_threads, + ) + + if self.scoring == "loss": + should_early_stop = self._check_early_stopping_loss( + raw_predictions=raw_predictions, + y_train=y_train, + sample_weight_train=sample_weight_train, + raw_predictions_val=raw_predictions_val, + y_val=y_val, + sample_weight_val=sample_weight_val, + n_threads=n_threads, + ) + + else: + # If the scorer is a predefined string, then we optimize the + # evaluation by re-using the incrementally computed raw predictions. + if scoring_is_predefined_string: + raw_predictions_small_train = raw_predictions[ + indices_small_train + ] + else: + raw_predictions_small_train = None + + should_early_stop = self._check_early_stopping_scorer( + X_binned_small_train, + y_small_train, + sample_weight_small_train, + X_binned_val, + y_val, + sample_weight_val, + raw_predictions_small_train=raw_predictions_small_train, + raw_predictions_val=raw_predictions_val, + ) + + if self.verbose: + self._print_iteration_stats(iteration_start_time) + + # maybe we could also early stop if all the trees are stumps? + if should_early_stop: + break + + if self.verbose: + duration = time() - fit_start_time + n_total_leaves = sum( + predictor.get_n_leaf_nodes() + for predictors_at_ith_iteration in self._predictors + for predictor in predictors_at_ith_iteration + ) + n_predictors = sum( + len(predictors_at_ith_iteration) + for predictors_at_ith_iteration in self._predictors + ) + print( + "Fit {} trees in {:.3f} s, ({} total leaves)".format( + n_predictors, duration, n_total_leaves + ) + ) + print( + "{:<32} {:.3f}s".format( + "Time spent computing histograms:", acc_compute_hist_time + ) + ) + print( + "{:<32} {:.3f}s".format( + "Time spent finding best splits:", acc_find_split_time + ) + ) + print( + "{:<32} {:.3f}s".format( + "Time spent applying splits:", acc_apply_split_time + ) + ) + print( + "{:<32} {:.3f}s".format("Time spent predicting:", acc_prediction_time) + ) + + self.train_score_ = np.asarray(self.train_score_) + self.validation_score_ = np.asarray(self.validation_score_) + del self._in_fit # hard delete so we're sure it can't be used anymore + return self + + def _is_fitted(self): + return len(getattr(self, "_predictors", [])) > 0 + + def _clear_state(self): + """Clear the state of the gradient boosting model.""" + for var in ("train_score_", "validation_score_"): + if hasattr(self, var): + delattr(self, var) + + def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed): + """Compute the indices of the subsample set and return this set. + + For efficiency, we need to subsample the training set to compute scores + with scorers. + """ + # TODO: incorporate sample_weights here in `resample` + subsample_size = 10000 + if X_binned_train.shape[0] > subsample_size: + indices = np.arange(X_binned_train.shape[0]) + stratify = y_train if is_classifier(self) else None + indices = resample( + indices, + n_samples=subsample_size, + replace=False, + random_state=seed, + stratify=stratify, + ) + X_binned_small_train = X_binned_train[indices] + y_small_train = y_train[indices] + if sample_weight_train is not None: + sample_weight_small_train = sample_weight_train[indices] + else: + sample_weight_small_train = None + X_binned_small_train = np.ascontiguousarray(X_binned_small_train) + return ( + X_binned_small_train, + y_small_train, + sample_weight_small_train, + indices, + ) + else: + return X_binned_train, y_train, sample_weight_train, slice(None) + + def _check_early_stopping_scorer( + self, + X_binned_small_train, + y_small_train, + sample_weight_small_train, + X_binned_val, + y_val, + sample_weight_val, + raw_predictions_small_train=None, + raw_predictions_val=None, + ): + """Check if fitting should be early-stopped based on scorer. + + Scores are computed on validation data or on training data. + """ + if is_classifier(self): + y_small_train = self.classes_[y_small_train.astype(int)] + + self.train_score_.append( + self._score_with_raw_predictions( + X_binned_small_train, + y_small_train, + sample_weight_small_train, + raw_predictions_small_train, + ) + ) + + if self._use_validation_data: + if is_classifier(self): + y_val = self.classes_[y_val.astype(int)] + self.validation_score_.append( + self._score_with_raw_predictions( + X_binned_val, y_val, sample_weight_val, raw_predictions_val + ) + ) + return self._should_stop(self.validation_score_) + else: + return self._should_stop(self.train_score_) + + def _score_with_raw_predictions(self, X, y, sample_weight, raw_predictions=None): + if raw_predictions is None: + patcher_raw_predict = nullcontext() + else: + patcher_raw_predict = _patch_raw_predict(self, raw_predictions) + + with patcher_raw_predict: + if sample_weight is None: + return self._scorer(self, X, y) + else: + return self._scorer(self, X, y, sample_weight=sample_weight) + + def _check_early_stopping_loss( + self, + raw_predictions, + y_train, + sample_weight_train, + raw_predictions_val, + y_val, + sample_weight_val, + n_threads=1, + ): + """Check if fitting should be early-stopped based on loss. + + Scores are computed on validation data or on training data. + """ + self.train_score_.append( + -self._loss( + y_true=y_train, + raw_prediction=raw_predictions, + sample_weight=sample_weight_train, + n_threads=n_threads, + ) + ) + + if self._use_validation_data: + self.validation_score_.append( + -self._loss( + y_true=y_val, + raw_prediction=raw_predictions_val, + sample_weight=sample_weight_val, + n_threads=n_threads, + ) + ) + return self._should_stop(self.validation_score_) + else: + return self._should_stop(self.train_score_) + + def _should_stop(self, scores): + """ + Return True (do early stopping) if the last n scores aren't better + than the (n-1)th-to-last score, up to some tolerance. + """ + reference_position = self.n_iter_no_change + 1 + if len(scores) < reference_position: + return False + + # A higher score is always better. Higher tol means that it will be + # harder for subsequent iteration to be considered an improvement upon + # the reference score, and therefore it is more likely to early stop + # because of the lack of significant improvement. + reference_score = scores[-reference_position] + self.tol + recent_scores = scores[-reference_position + 1 :] + recent_improvements = [score > reference_score for score in recent_scores] + return not any(recent_improvements) + + def _bin_data(self, X, is_training_data): + """Bin data X. + + If is_training_data, then fit the _bin_mapper attribute. + Else, the binned data is converted to a C-contiguous array. + """ + + description = "training" if is_training_data else "validation" + if self.verbose: + print( + "Binning {:.3f} GB of {} data: ".format(X.nbytes / 1e9, description), + end="", + flush=True, + ) + tic = time() + if is_training_data: + X_binned = self._bin_mapper.fit_transform(X) # F-aligned array + else: + X_binned = self._bin_mapper.transform(X) # F-aligned array + # We convert the array to C-contiguous since predicting is faster + # with this layout (training is faster on F-arrays though) + X_binned = np.ascontiguousarray(X_binned) + toc = time() + if self.verbose: + duration = toc - tic + print("{:.3f} s".format(duration)) + + return X_binned + + def _print_iteration_stats(self, iteration_start_time): + """Print info about the current fitting iteration.""" + log_msg = "" + + predictors_of_ith_iteration = [ + predictors_list + for predictors_list in self._predictors[-1] + if predictors_list + ] + n_trees = len(predictors_of_ith_iteration) + max_depth = max( + predictor.get_max_depth() for predictor in predictors_of_ith_iteration + ) + n_leaves = sum( + predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration + ) + + if n_trees == 1: + log_msg += "{} tree, {} leaves, ".format(n_trees, n_leaves) + else: + log_msg += "{} trees, {} leaves ".format(n_trees, n_leaves) + log_msg += "({} on avg), ".format(int(n_leaves / n_trees)) + + log_msg += "max depth = {}, ".format(max_depth) + + if self.do_early_stopping_: + if self.scoring == "loss": + factor = -1 # score_ arrays contain the negative loss + name = "loss" + else: + factor = 1 + name = "score" + log_msg += "train {}: {:.5f}, ".format(name, factor * self.train_score_[-1]) + if self._use_validation_data: + log_msg += "val {}: {:.5f}, ".format( + name, factor * self.validation_score_[-1] + ) + + iteration_time = time() - iteration_start_time + log_msg += "in {:0.3f}s".format(iteration_time) + + print(log_msg) + + def _raw_predict(self, X, n_threads=None): + """Return the sum of the leaves values over all predictors. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + n_threads : int, default=None + Number of OpenMP threads to use. `_openmp_effective_n_threads` is called + to determine the effective number of threads use, which takes cgroups CPU + quotes into account. See the docstring of `_openmp_effective_n_threads` + for details. + + Returns + ------- + raw_predictions : array, shape (n_samples, n_trees_per_iteration) + The raw predicted values. + """ + check_is_fitted(self) + is_binned = getattr(self, "_in_fit", False) + if not is_binned: + X = self._preprocess_X(X, reset=False) + + n_samples = X.shape[0] + raw_predictions = np.zeros( + shape=(n_samples, self.n_trees_per_iteration_), + dtype=self._baseline_prediction.dtype, + order="F", + ) + raw_predictions += self._baseline_prediction + + # We intentionally decouple the number of threads used at prediction + # time from the number of threads used at fit time because the model + # can be deployed on a different machine for prediction purposes. + n_threads = _openmp_effective_n_threads(n_threads) + self._predict_iterations( + X, self._predictors, raw_predictions, is_binned, n_threads + ) + return raw_predictions + + def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads): + """Add the predictions of the predictors to raw_predictions.""" + if not is_binned: + ( + known_cat_bitsets, + f_idx_map, + ) = self._bin_mapper.make_known_categories_bitsets() + + for predictors_of_ith_iteration in predictors: + for k, predictor in enumerate(predictors_of_ith_iteration): + if is_binned: + predict = partial( + predictor.predict_binned, + missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_, + n_threads=n_threads, + ) + else: + predict = partial( + predictor.predict, + known_cat_bitsets=known_cat_bitsets, + f_idx_map=f_idx_map, + n_threads=n_threads, + ) + raw_predictions[:, k] += predict(X) + + def _staged_raw_predict(self, X): + """Compute raw predictions of ``X`` for each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Yields + ------ + raw_predictions : generator of ndarray of shape \ + (n_samples, n_trees_per_iteration) + The raw predictions of the input samples. The order of the + classes corresponds to that in the attribute :term:`classes_`. + """ + check_is_fitted(self) + X = self._preprocess_X(X, reset=False) + if X.shape[1] != self._n_features: + raise ValueError( + "X has {} features but this estimator was trained with " + "{} features.".format(X.shape[1], self._n_features) + ) + n_samples = X.shape[0] + raw_predictions = np.zeros( + shape=(n_samples, self.n_trees_per_iteration_), + dtype=self._baseline_prediction.dtype, + order="F", + ) + raw_predictions += self._baseline_prediction + + # We intentionally decouple the number of threads used at prediction + # time from the number of threads used at fit time because the model + # can be deployed on a different machine for prediction purposes. + n_threads = _openmp_effective_n_threads() + for iteration in range(len(self._predictors)): + self._predict_iterations( + X, + self._predictors[iteration : iteration + 1], + raw_predictions, + is_binned=False, + n_threads=n_threads, + ) + yield raw_predictions.copy() + + def _compute_partial_dependence_recursion(self, grid, target_features): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray, shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray, shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + + Returns + ------- + averaged_predictions : ndarray, shape \ + (n_trees_per_iteration, n_samples) + The value of the partial dependence function on each grid point. + """ + + if getattr(self, "_fitted_with_sw", False): + raise NotImplementedError( + "{} does not support partial dependence " + "plots with the 'recursion' method when " + "sample weights were given during fit " + "time.".format(self.__class__.__name__) + ) + + grid = np.asarray(grid, dtype=X_DTYPE, order="C") + averaged_predictions = np.zeros( + (self.n_trees_per_iteration_, grid.shape[0]), dtype=Y_DTYPE + ) + + for predictors_of_ith_iteration in self._predictors: + for k, predictor in enumerate(predictors_of_ith_iteration): + predictor.compute_partial_dependence( + grid, target_features, averaged_predictions[k] + ) + # Note that the learning rate is already accounted for in the leaves + # values. + + return averaged_predictions + + def _more_tags(self): + return {"allow_nan": True} + + @abstractmethod + def _get_loss(self, sample_weight): + pass + + @abstractmethod + def _encode_y(self, y=None): + pass + + @property + def n_iter_(self): + """Number of iterations of the boosting process.""" + check_is_fitted(self) + return len(self._predictors) + + +class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting): + """Histogram-based Gradient Boosting Regression Tree. + + This estimator is much faster than + :class:`GradientBoostingRegressor` + for big datasets (n_samples >= 10 000). + + This estimator has native support for missing values (NaNs). During + training, the tree grower learns at each split point whether samples + with missing values should go to the left or right child, based on the + potential gain. When predicting, samples with missing values are + assigned to the left or right child consequently. If no missing values + were encountered for a given feature during training, then samples with + missing values are mapped to whichever child has the most samples. + + This implementation is inspired by + `LightGBM `_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + Parameters + ---------- + loss : {'squared_error', 'absolute_error', 'gamma', 'poisson', 'quantile'}, \ + default='squared_error' + The loss function to use in the boosting process. Note that the + "squared error", "gamma" and "poisson" losses actually implement + "half least squares loss", "half gamma deviance" and "half poisson + deviance" to simplify the computation of the gradient. Furthermore, + "gamma" and "poisson" losses internally use a log-link, "gamma" + requires ``y > 0`` and "poisson" requires ``y >= 0``. + "quantile" uses the pinball loss. + + .. versionchanged:: 0.23 + Added option 'poisson'. + + .. versionchanged:: 1.1 + Added option 'quantile'. + + .. versionchanged:: 1.3 + Added option 'gamma'. + + quantile : float, default=None + If loss is "quantile", this parameter specifies which quantile to be estimated + and must be between 0 and 1. + learning_rate : float, default=0.1 + The learning rate, also known as *shrinkage*. This is used as a + multiplicative factor for the leaves values. Use ``1`` for no + shrinkage. + max_iter : int, default=100 + The maximum number of iterations of the boosting process, i.e. the + maximum number of trees. + max_leaf_nodes : int or None, default=31 + The maximum number of leaves for each tree. Must be strictly greater + than 1. If None, there is no maximum limit. + max_depth : int or None, default=None + The maximum depth of each tree. The depth of a tree is the number of + edges to go from the root to the deepest leaf. + Depth isn't constrained by default. + min_samples_leaf : int, default=20 + The minimum number of samples per leaf. For small datasets with less + than a few hundred samples, it is recommended to lower this value + since only very shallow trees would be built. + l2_regularization : float, default=0 + The L2 regularization parameter. Use ``0`` for no regularization (default). + max_features : float, default=1.0 + Proportion of randomly chosen features in each and every node split. + This is a form of regularization, smaller values make the trees weaker + learners and might prevent overfitting. + If interaction constraints from `interaction_cst` are present, only allowed + features are taken into account for the subsampling. + + .. versionadded:: 1.4 + + max_bins : int, default=255 + The maximum number of bins to use for non-missing values. Before + training, each feature of the input array `X` is binned into + integer-valued bins, which allows for a much faster training stage. + Features with a small number of unique values may use less than + ``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin + is always reserved for missing values. Must be no larger than 255. + categorical_features : array-like of {bool, int, str} of shape (n_features) \ + or shape (n_categorical_features,), default=None + Indicates the categorical features. + + - None : no feature will be considered categorical. + - boolean array-like : boolean mask indicating categorical features. + - integer array-like : integer indices indicating categorical + features. + - str array-like: names of categorical features (assuming the training + data has feature names). + - `"from_dtype"`: dataframe columns with dtype "category" are + considered to be categorical features. The input must be an object + exposing a ``__dataframe__`` method such as pandas or polars + DataFrames to use this feature. + + For each categorical feature, there must be at most `max_bins` unique + categories. Negative values for categorical features encoded as numeric + dtypes are treated as missing values. All categorical values are + converted to floating point numbers. This means that categorical values + of 1.0 and 1 are treated as the same category. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + .. versionchanged:: 1.2 + Added support for feature names. + + .. versionchanged:: 1.4 + Added `"from_dtype"` option. The default will change to `"from_dtype"` in + v1.6. + + monotonic_cst : array-like of int of shape (n_features) or dict, default=None + Monotonic constraint to enforce on each feature are specified using the + following integer values: + + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If a dict with str keys, map feature to monotonic constraints by name. + If an array, the features are mapped to constraints by position. See + :ref:`monotonic_cst_features_names` for a usage example. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + .. versionchanged:: 1.2 + Accept dict of constraints with feature names as keys. + + interaction_cst : {"pairwise", "no_interactions"} or sequence of lists/tuples/sets \ + of int, default=None + Specify interaction constraints, the sets of features which can + interact with each other in child node splits. + + Each item specifies the set of feature indices that are allowed + to interact with each other. If there are more features than + specified in these constraints, they are treated as if they were + specified as an additional set. + + The strings "pairwise" and "no_interactions" are shorthands for + allowing only pairwise or no interactions, respectively. + + For instance, with 5 features in total, `interaction_cst=[{0, 1}]` + is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`, + and specifies that each branch of a tree will either only split + on features 0 and 1 or only split on features 2, 3 and 4. + + .. versionadded:: 1.2 + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble. For results to be valid, the + estimator should be re-trained on the same data only. + See :term:`the Glossary `. + early_stopping : 'auto' or bool, default='auto' + If 'auto', early stopping is enabled if the sample size is larger than + 10000. If True, early stopping is enabled, otherwise early stopping is + disabled. + + .. versionadded:: 0.23 + + scoring : str or callable or None, default='loss' + Scoring parameter to use for early stopping. It can be a single + string (see :ref:`scoring_parameter`) or a callable (see + :ref:`scoring`). If None, the estimator's default scorer is used. If + ``scoring='loss'``, early stopping is checked w.r.t the loss value. + Only used if early stopping is performed. + validation_fraction : int or float or None, default=0.1 + Proportion (or absolute size) of training data to set aside as + validation data for early stopping. If None, early stopping is done on + the training data. Only used if early stopping is performed. + n_iter_no_change : int, default=10 + Used to determine when to "early stop". The fitting process is + stopped when none of the last ``n_iter_no_change`` scores are better + than the ``n_iter_no_change - 1`` -th-to-last one, up to some + tolerance. Only used if early stopping is performed. + tol : float, default=1e-7 + The absolute tolerance to use when comparing scores during early + stopping. The higher the tolerance, the more likely we are to early + stop: higher tolerance means that it will be harder for subsequent + iterations to be considered an improvement upon the reference score. + verbose : int, default=0 + The verbosity level. If not zero, print some information about the + fitting process. + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the subsampling in the + binning process, and the train/validation data split if early stopping + is enabled. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + do_early_stopping_ : bool + Indicates whether early stopping is used during training. + n_iter_ : int + The number of iterations as selected by early stopping, depending on + the `early_stopping` parameter. Otherwise it corresponds to max_iter. + n_trees_per_iteration_ : int + The number of tree that are built at each iteration. For regressors, + this is always 1. + train_score_ : ndarray, shape (n_iter_+1,) + The scores at each iteration on the training data. The first entry + is the score of the ensemble before the first iteration. Scores are + computed according to the ``scoring`` parameter. If ``scoring`` is + not 'loss', scores are computed on a subset of at most 10 000 + samples. Empty if no early stopping. + validation_score_ : ndarray, shape (n_iter_+1,) + The scores at each iteration on the held-out validation data. The + first entry is the score of the ensemble before the first iteration. + Scores are computed according to the ``scoring`` parameter. Empty if + no early stopping or if ``validation_fraction`` is None. + is_categorical_ : ndarray, shape (n_features, ) or None + Boolean mask for the categorical features. ``None`` if there are no + categorical features. + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GradientBoostingRegressor : Exact gradient boosting method that does not + scale as good on datasets with a large number of samples. + sklearn.tree.DecisionTreeRegressor : A decision tree regressor. + RandomForestRegressor : A meta-estimator that fits a number of decision + tree regressors on various sub-samples of the dataset and uses + averaging to improve the statistical performance and control + over-fitting. + AdaBoostRegressor : A meta-estimator that begins by fitting a regressor + on the original dataset and then fits additional copies of the + regressor on the same dataset but where the weights of instances are + adjusted according to the error of the current prediction. As such, + subsequent regressors focus more on difficult cases. + + Examples + -------- + >>> from sklearn.ensemble import HistGradientBoostingRegressor + >>> from sklearn.datasets import load_diabetes + >>> X, y = load_diabetes(return_X_y=True) + >>> est = HistGradientBoostingRegressor().fit(X, y) + >>> est.score(X, y) + 0.92... + """ + + _parameter_constraints: dict = { + **BaseHistGradientBoosting._parameter_constraints, + "loss": [ + StrOptions( + { + "squared_error", + "absolute_error", + "poisson", + "gamma", + "quantile", + } + ), + BaseLoss, + ], + "quantile": [Interval(Real, 0, 1, closed="both"), None], + } + + def __init__( + self, + loss="squared_error", + *, + quantile=None, + learning_rate=0.1, + max_iter=100, + max_leaf_nodes=31, + max_depth=None, + min_samples_leaf=20, + l2_regularization=0.0, + max_features=1.0, + max_bins=255, + categorical_features="warn", + monotonic_cst=None, + interaction_cst=None, + warm_start=False, + early_stopping="auto", + scoring="loss", + validation_fraction=0.1, + n_iter_no_change=10, + tol=1e-7, + verbose=0, + random_state=None, + ): + super(HistGradientBoostingRegressor, self).__init__( + loss=loss, + learning_rate=learning_rate, + max_iter=max_iter, + max_leaf_nodes=max_leaf_nodes, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + l2_regularization=l2_regularization, + max_features=max_features, + max_bins=max_bins, + monotonic_cst=monotonic_cst, + interaction_cst=interaction_cst, + categorical_features=categorical_features, + early_stopping=early_stopping, + warm_start=warm_start, + scoring=scoring, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + tol=tol, + verbose=verbose, + random_state=random_state, + ) + self.quantile = quantile + + def predict(self, X): + """Predict values for X. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + The input samples. + + Returns + ------- + y : ndarray, shape (n_samples,) + The predicted values. + """ + check_is_fitted(self) + # Return inverse link of raw predictions after converting + # shape (n_samples, 1) to (n_samples,) + return self._loss.link.inverse(self._raw_predict(X).ravel()) + + def staged_predict(self, X): + """Predict regression target for each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted values of the input samples, for each iteration. + """ + for raw_predictions in self._staged_raw_predict(X): + yield self._loss.link.inverse(raw_predictions.ravel()) + + def _encode_y(self, y): + # Just convert y to the expected dtype + self.n_trees_per_iteration_ = 1 + y = y.astype(Y_DTYPE, copy=False) + if self.loss == "gamma": + # Ensure y > 0 + if not np.all(y > 0): + raise ValueError("loss='gamma' requires strictly positive y.") + elif self.loss == "poisson": + # Ensure y >= 0 and sum(y) > 0 + if not (np.all(y >= 0) and np.sum(y) > 0): + raise ValueError( + "loss='poisson' requires non-negative y and sum(y) > 0." + ) + return y + + def _get_loss(self, sample_weight): + if self.loss == "quantile": + return _LOSSES[self.loss]( + sample_weight=sample_weight, quantile=self.quantile + ) + else: + return _LOSSES[self.loss](sample_weight=sample_weight) + + +class HistGradientBoostingClassifier(ClassifierMixin, BaseHistGradientBoosting): + """Histogram-based Gradient Boosting Classification Tree. + + This estimator is much faster than + :class:`GradientBoostingClassifier` + for big datasets (n_samples >= 10 000). + + This estimator has native support for missing values (NaNs). During + training, the tree grower learns at each split point whether samples + with missing values should go to the left or right child, based on the + potential gain. When predicting, samples with missing values are + assigned to the left or right child consequently. If no missing values + were encountered for a given feature during training, then samples with + missing values are mapped to whichever child has the most samples. + + This implementation is inspired by + `LightGBM `_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.21 + + Parameters + ---------- + loss : {'log_loss'}, default='log_loss' + The loss function to use in the boosting process. + + For binary classification problems, 'log_loss' is also known as logistic loss, + binomial deviance or binary crossentropy. Internally, the model fits one tree + per boosting iteration and uses the logistic sigmoid function (expit) as + inverse link function to compute the predicted positive class probability. + + For multiclass classification problems, 'log_loss' is also known as multinomial + deviance or categorical crossentropy. Internally, the model fits one tree per + boosting iteration and per class and uses the softmax function as inverse link + function to compute the predicted probabilities of the classes. + + learning_rate : float, default=0.1 + The learning rate, also known as *shrinkage*. This is used as a + multiplicative factor for the leaves values. Use ``1`` for no + shrinkage. + max_iter : int, default=100 + The maximum number of iterations of the boosting process, i.e. the + maximum number of trees for binary classification. For multiclass + classification, `n_classes` trees per iteration are built. + max_leaf_nodes : int or None, default=31 + The maximum number of leaves for each tree. Must be strictly greater + than 1. If None, there is no maximum limit. + max_depth : int or None, default=None + The maximum depth of each tree. The depth of a tree is the number of + edges to go from the root to the deepest leaf. + Depth isn't constrained by default. + min_samples_leaf : int, default=20 + The minimum number of samples per leaf. For small datasets with less + than a few hundred samples, it is recommended to lower this value + since only very shallow trees would be built. + l2_regularization : float, default=0 + The L2 regularization parameter. Use ``0`` for no regularization (default). + max_features : float, default=1.0 + Proportion of randomly chosen features in each and every node split. + This is a form of regularization, smaller values make the trees weaker + learners and might prevent overfitting. + If interaction constraints from `interaction_cst` are present, only allowed + features are taken into account for the subsampling. + + .. versionadded:: 1.4 + + max_bins : int, default=255 + The maximum number of bins to use for non-missing values. Before + training, each feature of the input array `X` is binned into + integer-valued bins, which allows for a much faster training stage. + Features with a small number of unique values may use less than + ``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin + is always reserved for missing values. Must be no larger than 255. + categorical_features : array-like of {bool, int, str} of shape (n_features) \ + or shape (n_categorical_features,), default=None + Indicates the categorical features. + + - None : no feature will be considered categorical. + - boolean array-like : boolean mask indicating categorical features. + - integer array-like : integer indices indicating categorical + features. + - str array-like: names of categorical features (assuming the training + data has feature names). + - `"from_dtype"`: dataframe columns with dtype "category" are + considered to be categorical features. The input must be an object + exposing a ``__dataframe__`` method such as pandas or polars + DataFrames to use this feature. + + For each categorical feature, there must be at most `max_bins` unique + categories. Negative values for categorical features encoded as numeric + dtypes are treated as missing values. All categorical values are + converted to floating point numbers. This means that categorical values + of 1.0 and 1 are treated as the same category. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + .. versionchanged:: 1.2 + Added support for feature names. + + .. versionchanged:: 1.4 + Added `"from_dtype"` option. The default will change to `"from_dtype"` in + v1.6. + + monotonic_cst : array-like of int of shape (n_features) or dict, default=None + Monotonic constraint to enforce on each feature are specified using the + following integer values: + + - 1: monotonic increase + - 0: no constraint + - -1: monotonic decrease + + If a dict with str keys, map feature to monotonic constraints by name. + If an array, the features are mapped to constraints by position. See + :ref:`monotonic_cst_features_names` for a usage example. + + The constraints are only valid for binary classifications and hold + over the probability of the positive class. + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.23 + + .. versionchanged:: 1.2 + Accept dict of constraints with feature names as keys. + + interaction_cst : {"pairwise", "no_interactions"} or sequence of lists/tuples/sets \ + of int, default=None + Specify interaction constraints, the sets of features which can + interact with each other in child node splits. + + Each item specifies the set of feature indices that are allowed + to interact with each other. If there are more features than + specified in these constraints, they are treated as if they were + specified as an additional set. + + The strings "pairwise" and "no_interactions" are shorthands for + allowing only pairwise or no interactions, respectively. + + For instance, with 5 features in total, `interaction_cst=[{0, 1}]` + is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`, + and specifies that each branch of a tree will either only split + on features 0 and 1 or only split on features 2, 3 and 4. + + .. versionadded:: 1.2 + + warm_start : bool, default=False + When set to ``True``, reuse the solution of the previous call to fit + and add more estimators to the ensemble. For results to be valid, the + estimator should be re-trained on the same data only. + See :term:`the Glossary `. + early_stopping : 'auto' or bool, default='auto' + If 'auto', early stopping is enabled if the sample size is larger than + 10000. If True, early stopping is enabled, otherwise early stopping is + disabled. + + .. versionadded:: 0.23 + + scoring : str or callable or None, default='loss' + Scoring parameter to use for early stopping. It can be a single + string (see :ref:`scoring_parameter`) or a callable (see + :ref:`scoring`). If None, the estimator's default scorer + is used. If ``scoring='loss'``, early stopping is checked + w.r.t the loss value. Only used if early stopping is performed. + validation_fraction : int or float or None, default=0.1 + Proportion (or absolute size) of training data to set aside as + validation data for early stopping. If None, early stopping is done on + the training data. Only used if early stopping is performed. + n_iter_no_change : int, default=10 + Used to determine when to "early stop". The fitting process is + stopped when none of the last ``n_iter_no_change`` scores are better + than the ``n_iter_no_change - 1`` -th-to-last one, up to some + tolerance. Only used if early stopping is performed. + tol : float, default=1e-7 + The absolute tolerance to use when comparing scores. The higher the + tolerance, the more likely we are to early stop: higher tolerance + means that it will be harder for subsequent iterations to be + considered an improvement upon the reference score. + verbose : int, default=0 + The verbosity level. If not zero, print some information about the + fitting process. + random_state : int, RandomState instance or None, default=None + Pseudo-random number generator to control the subsampling in the + binning process, and the train/validation data split if early stopping + is enabled. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form `{class_label: weight}`. + If not given, all classes are supposed to have weight one. + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as `n_samples / (n_classes * np.bincount(y))`. + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if `sample_weight` is specified. + + .. versionadded:: 1.2 + + Attributes + ---------- + classes_ : array, shape = (n_classes,) + Class labels. + do_early_stopping_ : bool + Indicates whether early stopping is used during training. + n_iter_ : int + The number of iterations as selected by early stopping, depending on + the `early_stopping` parameter. Otherwise it corresponds to max_iter. + n_trees_per_iteration_ : int + The number of tree that are built at each iteration. This is equal to 1 + for binary classification, and to ``n_classes`` for multiclass + classification. + train_score_ : ndarray, shape (n_iter_+1,) + The scores at each iteration on the training data. The first entry + is the score of the ensemble before the first iteration. Scores are + computed according to the ``scoring`` parameter. If ``scoring`` is + not 'loss', scores are computed on a subset of at most 10 000 + samples. Empty if no early stopping. + validation_score_ : ndarray, shape (n_iter_+1,) + The scores at each iteration on the held-out validation data. The + first entry is the score of the ensemble before the first iteration. + Scores are computed according to the ``scoring`` parameter. Empty if + no early stopping or if ``validation_fraction`` is None. + is_categorical_ : ndarray, shape (n_features, ) or None + Boolean mask for the categorical features. ``None`` if there are no + categorical features. + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GradientBoostingClassifier : Exact gradient boosting method that does not + scale as good on datasets with a large number of samples. + sklearn.tree.DecisionTreeClassifier : A decision tree classifier. + RandomForestClassifier : A meta-estimator that fits a number of decision + tree classifiers on various sub-samples of the dataset and uses + averaging to improve the predictive accuracy and control over-fitting. + AdaBoostClassifier : A meta-estimator that begins by fitting a classifier + on the original dataset and then fits additional copies of the + classifier on the same dataset where the weights of incorrectly + classified instances are adjusted such that subsequent classifiers + focus more on difficult cases. + + Examples + -------- + >>> from sklearn.ensemble import HistGradientBoostingClassifier + >>> from sklearn.datasets import load_iris + >>> X, y = load_iris(return_X_y=True) + >>> clf = HistGradientBoostingClassifier().fit(X, y) + >>> clf.score(X, y) + 1.0 + """ + + _parameter_constraints: dict = { + **BaseHistGradientBoosting._parameter_constraints, + "loss": [StrOptions({"log_loss"}), BaseLoss], + "class_weight": [dict, StrOptions({"balanced"}), None], + } + + def __init__( + self, + loss="log_loss", + *, + learning_rate=0.1, + max_iter=100, + max_leaf_nodes=31, + max_depth=None, + min_samples_leaf=20, + l2_regularization=0.0, + max_features=1.0, + max_bins=255, + categorical_features="warn", + monotonic_cst=None, + interaction_cst=None, + warm_start=False, + early_stopping="auto", + scoring="loss", + validation_fraction=0.1, + n_iter_no_change=10, + tol=1e-7, + verbose=0, + random_state=None, + class_weight=None, + ): + super(HistGradientBoostingClassifier, self).__init__( + loss=loss, + learning_rate=learning_rate, + max_iter=max_iter, + max_leaf_nodes=max_leaf_nodes, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + l2_regularization=l2_regularization, + max_features=max_features, + max_bins=max_bins, + categorical_features=categorical_features, + monotonic_cst=monotonic_cst, + interaction_cst=interaction_cst, + warm_start=warm_start, + early_stopping=early_stopping, + scoring=scoring, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + tol=tol, + verbose=verbose, + random_state=random_state, + ) + self.class_weight = class_weight + + def _finalize_sample_weight(self, sample_weight, y): + """Adjust sample_weights with class_weights.""" + if self.class_weight is None: + return sample_weight + + expanded_class_weight = compute_sample_weight(self.class_weight, y) + + if sample_weight is not None: + return sample_weight * expanded_class_weight + else: + return expanded_class_weight + + def predict(self, X): + """Predict classes for X. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + The input samples. + + Returns + ------- + y : ndarray, shape (n_samples,) + The predicted classes. + """ + # TODO: This could be done in parallel + encoded_classes = np.argmax(self.predict_proba(X), axis=1) + return self.classes_[encoded_classes] + + def staged_predict(self, X): + """Predict classes at each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted classes of the input samples, for each iteration. + """ + for proba in self.staged_predict_proba(X): + encoded_classes = np.argmax(proba, axis=1) + yield self.classes_.take(encoded_classes, axis=0) + + def predict_proba(self, X): + """Predict class probabilities for X. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + The input samples. + + Returns + ------- + p : ndarray, shape (n_samples, n_classes) + The class probabilities of the input samples. + """ + raw_predictions = self._raw_predict(X) + return self._loss.predict_proba(raw_predictions) + + def staged_predict_proba(self, X): + """Predict class probabilities at each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted class probabilities of the input samples, + for each iteration. + """ + for raw_predictions in self._staged_raw_predict(X): + yield self._loss.predict_proba(raw_predictions) + + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + The input samples. + + Returns + ------- + decision : ndarray, shape (n_samples,) or \ + (n_samples, n_trees_per_iteration) + The raw predicted values (i.e. the sum of the trees leaves) for + each sample. n_trees_per_iteration is equal to the number of + classes in multiclass classification. + """ + decision = self._raw_predict(X) + if decision.shape[1] == 1: + decision = decision.ravel() + return decision + + def staged_decision_function(self, X): + """Compute decision function of ``X`` for each iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each stage. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. + + Yields + ------ + decision : generator of ndarray of shape (n_samples,) or \ + (n_samples, n_trees_per_iteration) + The decision function of the input samples, which corresponds to + the raw values predicted from the trees of the ensemble . The + classes corresponds to that in the attribute :term:`classes_`. + """ + for staged_decision in self._staged_raw_predict(X): + if staged_decision.shape[1] == 1: + staged_decision = staged_decision.ravel() + yield staged_decision + + def _encode_y(self, y): + # encode classes into 0 ... n_classes - 1 and sets attributes classes_ + # and n_trees_per_iteration_ + check_classification_targets(y) + + label_encoder = LabelEncoder() + encoded_y = label_encoder.fit_transform(y) + self.classes_ = label_encoder.classes_ + n_classes = self.classes_.shape[0] + # only 1 tree for binary classification. For multiclass classification, + # we build 1 tree per class. + self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes + encoded_y = encoded_y.astype(Y_DTYPE, copy=False) + return encoded_y + + def _get_loss(self, sample_weight): + # At this point self.loss == "log_loss" + if self.n_trees_per_iteration_ == 1: + return HalfBinomialLoss(sample_weight=sample_weight) + else: + return HalfMultinomialLoss( + sample_weight=sample_weight, n_classes=self.n_trees_per_iteration_ + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fb80f130e13465612d81f03ff21e735c9a5c6ca6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/histogram.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..b939712d18893600a164395c7ffc735903c9f332 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/predictor.py @@ -0,0 +1,144 @@ +""" +This module contains the TreePredictor class which is used for prediction. +""" +# Author: Nicolas Hug + +import numpy as np + +from ._predictor import ( + _compute_partial_dependence, + _predict_from_binned_data, + _predict_from_raw_data, +) +from .common import PREDICTOR_RECORD_DTYPE, Y_DTYPE + + +class TreePredictor: + """Tree class used for predictions. + + Parameters + ---------- + nodes : ndarray of PREDICTOR_RECORD_DTYPE + The nodes of the tree. + binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32 + Array of bitsets for binned categories used in predict_binned when a + split is categorical. + raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32 + Array of bitsets for raw categories used in predict when a split is + categorical. + """ + + def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets): + self.nodes = nodes + self.binned_left_cat_bitsets = binned_left_cat_bitsets + self.raw_left_cat_bitsets = raw_left_cat_bitsets + + def get_n_leaf_nodes(self): + """Return number of leaves.""" + return int(self.nodes["is_leaf"].sum()) + + def get_max_depth(self): + """Return maximum depth among all leaves.""" + return int(self.nodes["depth"].max()) + + def predict(self, X, known_cat_bitsets, f_idx_map, n_threads): + """Predict raw values for non-binned data. + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + The input samples. + + known_cat_bitsets : ndarray of shape (n_categorical_features, 8) + Array of bitsets of known categories, for each categorical feature. + + f_idx_map : ndarray of shape (n_features,) + Map from original feature index to the corresponding index in the + known_cat_bitsets array. + + n_threads : int + Number of OpenMP threads to use. + + Returns + ------- + y : ndarray, shape (n_samples,) + The raw predicted values. + """ + out = np.empty(X.shape[0], dtype=Y_DTYPE) + + _predict_from_raw_data( + self.nodes, + X, + self.raw_left_cat_bitsets, + known_cat_bitsets, + f_idx_map, + n_threads, + out, + ) + return out + + def predict_binned(self, X, missing_values_bin_idx, n_threads): + """Predict raw values for binned data. + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + The input samples. + missing_values_bin_idx : uint8 + Index of the bin that is used for missing values. This is the + index of the last bin and is always equal to max_bins (as passed + to the GBDT classes), or equivalently to n_bins - 1. + n_threads : int + Number of OpenMP threads to use. + + Returns + ------- + y : ndarray, shape (n_samples,) + The raw predicted values. + """ + out = np.empty(X.shape[0], dtype=Y_DTYPE) + _predict_from_binned_data( + self.nodes, + X, + self.binned_left_cat_bitsets, + missing_values_bin_idx, + n_threads, + out, + ) + return out + + def compute_partial_dependence(self, grid, target_features, out): + """Fast partial dependence computation. + + Parameters + ---------- + grid : ndarray, shape (n_samples, n_target_features) + The grid points on which the partial dependence should be + evaluated. + target_features : ndarray, shape (n_target_features) + The set of target features for which the partial dependence + should be evaluated. + out : ndarray, shape (n_samples) + The value of the partial dependence function on each grid + point. + """ + _compute_partial_dependence(self.nodes, grid, target_features, out) + + def __setstate__(self, state): + try: + super().__setstate__(state) + except AttributeError: + self.__dict__.update(state) + + # The dtype of feature_idx is np.intp which is platform dependent. Here, we + # make sure that saving and loading on different bitness systems works without + # errors. For instance, on a 64 bit Python runtime, np.intp = np.int64, + # while on 32 bit np.intp = np.int32. + # + # TODO: consider always using platform agnostic dtypes for fitted + # estimator attributes. For this particular estimator, this would + # mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32 + # field. Ideally this should be done consistently throughout + # scikit-learn along with a common test. + if self.nodes.dtype != PREDICTOR_RECORD_DTYPE: + self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind") diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6014e7dc2b15c6c88a79e635c0b43a41603ea93a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/splitting.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef5cd66c9e1e16fc3ab14b61dc22b2191e83a461 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c9e0d2c76112b36138ecca68093eef08ad60636 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_binning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45d5ee6c4679ecf3e17d81a2d4c553dcbf5d6289 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_bitset.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a2d1890f472a12e299db195aca245d26e989354 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_compare_lightgbm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76caf8203ded09235a03b489529fba7252a166d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_gradient_boosting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abbf6e55ff73e1828e2e3826ce3d1e603a9c0b2c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_grower.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac2326e2e7fcf0773823adceb0f01497248180f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_histogram.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7fa9b64711b9b2f55dbeba178bc03a1cadebe01 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_monotonic_contraints.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b4fda6d811c0c9306822031de34cefa87f44f1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_predictor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8f6bc622625786861e2dd56bf3782517325d252 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_splitting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99066676a476be20ac0eb5180f8d2eb0c672a856 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/__pycache__/test_warm_start.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py new file mode 100644 index 0000000000000000000000000000000000000000..6f9fcd0057141a398611ff94d528b1317ba4a0fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py @@ -0,0 +1,489 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.ensemble._hist_gradient_boosting.binning import ( + _BinMapper, + _find_binning_thresholds, + _map_to_bins, +) +from sklearn.ensemble._hist_gradient_boosting.common import ( + ALMOST_INF, + X_BINNED_DTYPE, + X_DTYPE, +) +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads + +n_threads = _openmp_effective_n_threads() + + +DATA = ( + np.random.RandomState(42) + .normal(loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2)) + .astype(X_DTYPE) +) + + +def test_find_binning_thresholds_regular_data(): + data = np.linspace(0, 10, 1001) + bin_thresholds = _find_binning_thresholds(data, max_bins=10) + assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9]) + + bin_thresholds = _find_binning_thresholds(data, max_bins=5) + assert_allclose(bin_thresholds, [2, 4, 6, 8]) + + +def test_find_binning_thresholds_small_regular_data(): + data = np.linspace(0, 10, 11) + + bin_thresholds = _find_binning_thresholds(data, max_bins=5) + assert_allclose(bin_thresholds, [2, 4, 6, 8]) + + bin_thresholds = _find_binning_thresholds(data, max_bins=10) + assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9]) + + bin_thresholds = _find_binning_thresholds(data, max_bins=11) + assert_allclose(bin_thresholds, np.arange(10) + 0.5) + + bin_thresholds = _find_binning_thresholds(data, max_bins=255) + assert_allclose(bin_thresholds, np.arange(10) + 0.5) + + +def test_find_binning_thresholds_random_data(): + bin_thresholds = [ + _find_binning_thresholds(DATA[:, i], max_bins=255) for i in range(2) + ] + for i in range(len(bin_thresholds)): + assert bin_thresholds[i].shape == (254,) # 255 - 1 + assert bin_thresholds[i].dtype == DATA.dtype + + assert_allclose( + bin_thresholds[0][[64, 128, 192]], np.array([-0.7, 0.0, 0.7]), atol=1e-1 + ) + + assert_allclose( + bin_thresholds[1][[64, 128, 192]], np.array([9.99, 10.00, 10.01]), atol=1e-2 + ) + + +def test_find_binning_thresholds_low_n_bins(): + bin_thresholds = [ + _find_binning_thresholds(DATA[:, i], max_bins=128) for i in range(2) + ] + for i in range(len(bin_thresholds)): + assert bin_thresholds[i].shape == (127,) # 128 - 1 + assert bin_thresholds[i].dtype == DATA.dtype + + +@pytest.mark.parametrize("n_bins", (2, 257)) +def test_invalid_n_bins(n_bins): + err_msg = "n_bins={} should be no smaller than 3 and no larger than 256".format( + n_bins + ) + with pytest.raises(ValueError, match=err_msg): + _BinMapper(n_bins=n_bins).fit(DATA) + + +def test_bin_mapper_n_features_transform(): + mapper = _BinMapper(n_bins=42, random_state=42).fit(DATA) + err_msg = "This estimator was fitted with 2 features but 4 got passed" + with pytest.raises(ValueError, match=err_msg): + mapper.transform(np.repeat(DATA, 2, axis=1)) + + +@pytest.mark.parametrize("max_bins", [16, 128, 255]) +def test_map_to_bins(max_bins): + bin_thresholds = [ + _find_binning_thresholds(DATA[:, i], max_bins=max_bins) for i in range(2) + ] + binned = np.zeros_like(DATA, dtype=X_BINNED_DTYPE, order="F") + is_categorical = np.zeros(2, dtype=np.uint8) + last_bin_idx = max_bins + _map_to_bins(DATA, bin_thresholds, is_categorical, last_bin_idx, n_threads, binned) + assert binned.shape == DATA.shape + assert binned.dtype == np.uint8 + assert binned.flags.f_contiguous + + min_indices = DATA.argmin(axis=0) + max_indices = DATA.argmax(axis=0) + + for feature_idx, min_idx in enumerate(min_indices): + assert binned[min_idx, feature_idx] == 0 + for feature_idx, max_idx in enumerate(max_indices): + assert binned[max_idx, feature_idx] == max_bins - 1 + + +@pytest.mark.parametrize("max_bins", [5, 10, 42]) +def test_bin_mapper_random_data(max_bins): + n_samples, n_features = DATA.shape + + expected_count_per_bin = n_samples // max_bins + tol = int(0.05 * expected_count_per_bin) + + # max_bins is the number of bins for non-missing values + n_bins = max_bins + 1 + mapper = _BinMapper(n_bins=n_bins, random_state=42).fit(DATA) + binned = mapper.transform(DATA) + + assert binned.shape == (n_samples, n_features) + assert binned.dtype == np.uint8 + assert_array_equal(binned.min(axis=0), np.array([0, 0])) + assert_array_equal(binned.max(axis=0), np.array([max_bins - 1, max_bins - 1])) + assert len(mapper.bin_thresholds_) == n_features + for bin_thresholds_feature in mapper.bin_thresholds_: + assert bin_thresholds_feature.shape == (max_bins - 1,) + assert bin_thresholds_feature.dtype == DATA.dtype + assert np.all(mapper.n_bins_non_missing_ == max_bins) + + # Check that the binned data is approximately balanced across bins. + for feature_idx in range(n_features): + for bin_idx in range(max_bins): + count = (binned[:, feature_idx] == bin_idx).sum() + assert abs(count - expected_count_per_bin) < tol + + +@pytest.mark.parametrize("n_samples, max_bins", [(5, 5), (5, 10), (5, 11), (42, 255)]) +def test_bin_mapper_small_random_data(n_samples, max_bins): + data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1) + assert len(np.unique(data)) == n_samples + + # max_bins is the number of bins for non-missing values + n_bins = max_bins + 1 + mapper = _BinMapper(n_bins=n_bins, random_state=42) + binned = mapper.fit_transform(data) + + assert binned.shape == data.shape + assert binned.dtype == np.uint8 + assert_array_equal(binned.ravel()[np.argsort(data.ravel())], np.arange(n_samples)) + + +@pytest.mark.parametrize( + "max_bins, n_distinct, multiplier", + [ + (5, 5, 1), + (5, 5, 3), + (255, 12, 42), + ], +) +def test_bin_mapper_identity_repeated_values(max_bins, n_distinct, multiplier): + data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1) + # max_bins is the number of bins for non-missing values + n_bins = max_bins + 1 + binned = _BinMapper(n_bins=n_bins).fit_transform(data) + assert_array_equal(data, binned) + + +@pytest.mark.parametrize("n_distinct", [2, 7, 42]) +def test_bin_mapper_repeated_values_invariance(n_distinct): + rng = np.random.RandomState(42) + distinct_values = rng.normal(size=n_distinct) + assert len(np.unique(distinct_values)) == n_distinct + + repeated_indices = rng.randint(low=0, high=n_distinct, size=1000) + data = distinct_values[repeated_indices] + rng.shuffle(data) + assert_array_equal(np.unique(data), np.sort(distinct_values)) + + data = data.reshape(-1, 1) + + mapper_1 = _BinMapper(n_bins=n_distinct + 1) + binned_1 = mapper_1.fit_transform(data) + assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct)) + + # Adding more bins to the mapper yields the same results (same thresholds) + mapper_2 = _BinMapper(n_bins=min(256, n_distinct * 3) + 1) + binned_2 = mapper_2.fit_transform(data) + + assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0]) + assert_array_equal(binned_1, binned_2) + + +@pytest.mark.parametrize( + "max_bins, scale, offset", + [ + (3, 2, -1), + (42, 1, 0), + (255, 0.3, 42), + ], +) +def test_bin_mapper_identity_small(max_bins, scale, offset): + data = np.arange(max_bins).reshape(-1, 1) * scale + offset + # max_bins is the number of bins for non-missing values + n_bins = max_bins + 1 + binned = _BinMapper(n_bins=n_bins).fit_transform(data) + assert_array_equal(binned, np.arange(max_bins).reshape(-1, 1)) + + +@pytest.mark.parametrize( + "max_bins_small, max_bins_large", + [ + (2, 2), + (3, 3), + (4, 4), + (42, 42), + (255, 255), + (5, 17), + (42, 255), + ], +) +def test_bin_mapper_idempotence(max_bins_small, max_bins_large): + assert max_bins_large >= max_bins_small + data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1) + mapper_small = _BinMapper(n_bins=max_bins_small + 1) + mapper_large = _BinMapper(n_bins=max_bins_small + 1) + binned_small = mapper_small.fit_transform(data) + binned_large = mapper_large.fit_transform(binned_small) + assert_array_equal(binned_small, binned_large) + + +@pytest.mark.parametrize("n_bins", [10, 100, 256]) +@pytest.mark.parametrize("diff", [-5, 0, 5]) +def test_n_bins_non_missing(n_bins, diff): + # Check that n_bins_non_missing is n_unique_values when + # there are not a lot of unique values, else n_bins - 1. + + n_unique_values = n_bins + diff + X = list(range(n_unique_values)) * 2 + X = np.array(X).reshape(-1, 1) + mapper = _BinMapper(n_bins=n_bins).fit(X) + assert np.all(mapper.n_bins_non_missing_ == min(n_bins - 1, n_unique_values)) + + +def test_subsample(): + # Make sure bin thresholds are different when applying subsampling + mapper_no_subsample = _BinMapper(subsample=None, random_state=0).fit(DATA) + mapper_subsample = _BinMapper(subsample=256, random_state=0).fit(DATA) + + for feature in range(DATA.shape[1]): + assert not np.allclose( + mapper_no_subsample.bin_thresholds_[feature], + mapper_subsample.bin_thresholds_[feature], + rtol=1e-4, + ) + + +@pytest.mark.parametrize( + "n_bins, n_bins_non_missing, X_trans_expected", + [ + ( + 256, + [4, 2, 2], + [ + [0, 0, 0], # 255 <=> missing value + [255, 255, 0], + [1, 0, 0], + [255, 1, 1], + [2, 1, 1], + [3, 0, 0], + ], + ), + ( + 3, + [2, 2, 2], + [ + [0, 0, 0], # 2 <=> missing value + [2, 2, 0], + [0, 0, 0], + [2, 1, 1], + [1, 1, 1], + [1, 0, 0], + ], + ), + ], +) +def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected): + # check for missing values: make sure nans are mapped to the last bin + # and that the _BinMapper attributes are correct + + X = [ + [1, 1, 0], + [np.nan, np.nan, 0], + [2, 1, 0], + [np.nan, 2, 1], + [3, 2, 1], + [4, 1, 0], + ] + + X = np.array(X) + + mapper = _BinMapper(n_bins=n_bins) + mapper.fit(X) + + assert_array_equal(mapper.n_bins_non_missing_, n_bins_non_missing) + + for feature_idx in range(X.shape[1]): + assert ( + len(mapper.bin_thresholds_[feature_idx]) + == n_bins_non_missing[feature_idx] - 1 + ) + + assert mapper.missing_values_bin_idx_ == n_bins - 1 + + X_trans = mapper.transform(X) + assert_array_equal(X_trans, X_trans_expected) + + +def test_infinite_values(): + # Make sure infinite values are properly handled. + bin_mapper = _BinMapper() + + X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1) + + bin_mapper.fit(X) + assert_allclose(bin_mapper.bin_thresholds_[0], [-np.inf, 0.5, ALMOST_INF]) + assert bin_mapper.n_bins_non_missing_ == [4] + + expected_binned_X = np.array([0, 1, 2, 3]).reshape(-1, 1) + assert_array_equal(bin_mapper.transform(X), expected_binned_X) + + +@pytest.mark.parametrize("n_bins", [15, 256]) +def test_categorical_feature(n_bins): + # Basic test for categorical features + # we make sure that categories are mapped into [0, n_categories - 1] and + # that nans are mapped to the last bin + X = np.array( + [[4] * 500 + [1] * 3 + [10] * 4 + [0] * 4 + [13] + [7] * 5 + [np.nan] * 2], + dtype=X_DTYPE, + ).T + known_categories = [np.unique(X[~np.isnan(X)])] + + bin_mapper = _BinMapper( + n_bins=n_bins, + is_categorical=np.array([True]), + known_categories=known_categories, + ).fit(X) + assert bin_mapper.n_bins_non_missing_ == [6] + assert_array_equal(bin_mapper.bin_thresholds_[0], [0, 1, 4, 7, 10, 13]) + + X = np.array([[0, 1, 4, np.nan, 7, 10, 13]], dtype=X_DTYPE).T + expected_trans = np.array([[0, 1, 2, n_bins - 1, 3, 4, 5]]).T + assert_array_equal(bin_mapper.transform(X), expected_trans) + + # Negative categories are mapped to the missing values' bin + # (i.e. the bin of index `missing_values_bin_idx_ == n_bins - 1). + # Unknown positive categories does not happen in practice and tested + # for illustration purpose. + X = np.array([[-4, -1, 100]], dtype=X_DTYPE).T + expected_trans = np.array([[n_bins - 1, n_bins - 1, 6]]).T + assert_array_equal(bin_mapper.transform(X), expected_trans) + + +def test_categorical_feature_negative_missing(): + """Make sure bin mapper treats negative categories as missing values.""" + X = np.array( + [[4] * 500 + [1] * 3 + [5] * 10 + [-1] * 3 + [np.nan] * 4], dtype=X_DTYPE + ).T + bin_mapper = _BinMapper( + n_bins=4, + is_categorical=np.array([True]), + known_categories=[np.array([1, 4, 5], dtype=X_DTYPE)], + ).fit(X) + + assert bin_mapper.n_bins_non_missing_ == [3] + + X = np.array([[-1, 1, 3, 5, np.nan]], dtype=X_DTYPE).T + + # Negative values for categorical features are considered as missing values. + # They are mapped to the bin of index `bin_mapper.missing_values_bin_idx_`, + # which is 3 here. + assert bin_mapper.missing_values_bin_idx_ == 3 + expected_trans = np.array([[3, 0, 1, 2, 3]]).T + assert_array_equal(bin_mapper.transform(X), expected_trans) + + +@pytest.mark.parametrize("n_bins", (128, 256)) +def test_categorical_with_numerical_features(n_bins): + # basic check for binmapper with mixed data + X1 = np.arange(10, 20).reshape(-1, 1) # numerical + X2 = np.arange(10, 15).reshape(-1, 1) # categorical + X2 = np.r_[X2, X2] + X = np.c_[X1, X2] + known_categories = [None, np.unique(X2).astype(X_DTYPE)] + + bin_mapper = _BinMapper( + n_bins=n_bins, + is_categorical=np.array([False, True]), + known_categories=known_categories, + ).fit(X) + + assert_array_equal(bin_mapper.n_bins_non_missing_, [10, 5]) + + bin_thresholds = bin_mapper.bin_thresholds_ + assert len(bin_thresholds) == 2 + assert_array_equal(bin_thresholds[1], np.arange(10, 15)) + + expected_X_trans = [ + [0, 0], + [1, 1], + [2, 2], + [3, 3], + [4, 4], + [5, 0], + [6, 1], + [7, 2], + [8, 3], + [9, 4], + ] + assert_array_equal(bin_mapper.transform(X), expected_X_trans) + + +def test_make_known_categories_bitsets(): + # Check the output of make_known_categories_bitsets + X = np.array( + [[14, 2, 30], [30, 4, 70], [40, 10, 180], [40, 240, 180]], dtype=X_DTYPE + ) + + bin_mapper = _BinMapper( + n_bins=256, + is_categorical=np.array([False, True, True]), + known_categories=[None, X[:, 1], X[:, 2]], + ) + bin_mapper.fit(X) + + known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets() + + # Note that for non-categorical features, values are left to 0 + expected_f_idx_map = np.array([0, 0, 1], dtype=np.uint8) + assert_allclose(expected_f_idx_map, f_idx_map) + + expected_cat_bitset = np.zeros((2, 8), dtype=np.uint32) + + # first categorical feature: [2, 4, 10, 240] + f_idx = 1 + mapped_f_idx = f_idx_map[f_idx] + expected_cat_bitset[mapped_f_idx, 0] = 2**2 + 2**4 + 2**10 + # 240 = 32**7 + 16, therefore the 16th bit of the 7th array is 1. + expected_cat_bitset[mapped_f_idx, 7] = 2**16 + + # second categorical feature [30, 70, 180] + f_idx = 2 + mapped_f_idx = f_idx_map[f_idx] + expected_cat_bitset[mapped_f_idx, 0] = 2**30 + expected_cat_bitset[mapped_f_idx, 2] = 2**6 + expected_cat_bitset[mapped_f_idx, 5] = 2**20 + + assert_allclose(expected_cat_bitset, known_cat_bitsets) + + +@pytest.mark.parametrize( + "is_categorical, known_categories, match", + [ + (np.array([True]), [None], "Known categories for feature 0 must be provided"), + ( + np.array([False]), + np.array([1, 2, 3]), + "isn't marked as a categorical feature, but categories were passed", + ), + ], +) +def test_categorical_parameters(is_categorical, known_categories, match): + # test the validation of the is_categorical and known_categories parameters + + X = np.array([[1, 2, 3]], dtype=X_DTYPE) + + bin_mapper = _BinMapper( + is_categorical=is_categorical, known_categories=known_categories + ) + with pytest.raises(ValueError, match=match): + bin_mapper.fit(X) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py new file mode 100644 index 0000000000000000000000000000000000000000..c02d66b666f80216088c691db39a55c055aa8d83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py @@ -0,0 +1,64 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.ensemble._hist_gradient_boosting._bitset import ( + in_bitset_memoryview, + set_bitset_memoryview, + set_raw_bitset_from_binned_bitset, +) +from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE + + +@pytest.mark.parametrize( + "values_to_insert, expected_bitset", + [ + ([0, 4, 33], np.array([2**0 + 2**4, 2**1, 0], dtype=np.uint32)), + ( + [31, 32, 33, 79], + np.array([2**31, 2**0 + 2**1, 2**15], dtype=np.uint32), + ), + ], +) +def test_set_get_bitset(values_to_insert, expected_bitset): + n_32bits_ints = 3 + bitset = np.zeros(n_32bits_ints, dtype=np.uint32) + for value in values_to_insert: + set_bitset_memoryview(bitset, value) + assert_allclose(expected_bitset, bitset) + for value in range(32 * n_32bits_ints): + if value in values_to_insert: + assert in_bitset_memoryview(bitset, value) + else: + assert not in_bitset_memoryview(bitset, value) + + +@pytest.mark.parametrize( + "raw_categories, binned_cat_to_insert, expected_raw_bitset", + [ + ( + [3, 4, 5, 10, 31, 32, 43], + [0, 2, 4, 5, 6], + [2**3 + 2**5 + 2**31, 2**0 + 2**11], + ), + ([3, 33, 50, 52], [1, 3], [0, 2**1 + 2**20]), + ], +) +def test_raw_bitset_from_binned_bitset( + raw_categories, binned_cat_to_insert, expected_raw_bitset +): + binned_bitset = np.zeros(2, dtype=np.uint32) + raw_bitset = np.zeros(2, dtype=np.uint32) + raw_categories = np.asarray(raw_categories, dtype=X_DTYPE) + + for val in binned_cat_to_insert: + set_bitset_memoryview(binned_bitset, val) + + set_raw_bitset_from_binned_bitset(raw_bitset, binned_bitset, raw_categories) + + assert_allclose(expected_raw_bitset, raw_bitset) + for binned_cat_val, raw_cat_val in enumerate(raw_categories): + if binned_cat_val in binned_cat_to_insert: + assert in_bitset_memoryview(raw_bitset, raw_cat_val) + else: + assert not in_bitset_memoryview(raw_bitset, raw_cat_val) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py new file mode 100644 index 0000000000000000000000000000000000000000..bbdcb38ef013ad2c8d1fe970908d0af3c2ac00db --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py @@ -0,0 +1,279 @@ +import numpy as np +import pytest + +from sklearn.datasets import make_classification, make_regression +from sklearn.ensemble import ( + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper +from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator +from sklearn.metrics import accuracy_score +from sklearn.model_selection import train_test_split + + +@pytest.mark.parametrize("seed", range(5)) +@pytest.mark.parametrize( + "loss", + [ + "squared_error", + "poisson", + pytest.param( + "gamma", + marks=pytest.mark.skip("LightGBM with gamma loss has larger deviation."), + ), + ], +) +@pytest.mark.parametrize("min_samples_leaf", (1, 20)) +@pytest.mark.parametrize( + "n_samples, max_leaf_nodes", + [ + (255, 4096), + (1000, 8), + ], +) +def test_same_predictions_regression( + seed, loss, min_samples_leaf, n_samples, max_leaf_nodes +): + # Make sure sklearn has the same predictions as lightgbm for easy targets. + # + # In particular when the size of the trees are bound and the number of + # samples is large enough, the structure of the prediction trees found by + # LightGBM and sklearn should be exactly identical. + # + # Notes: + # - Several candidate splits may have equal gains when the number of + # samples in a node is low (and because of float errors). Therefore the + # predictions on the test set might differ if the structure of the tree + # is not exactly the same. To avoid this issue we only compare the + # predictions on the test set when the number of samples is large enough + # and max_leaf_nodes is low enough. + # - To ignore discrepancies caused by small differences in the binning + # strategy, data is pre-binned if n_samples > 255. + # - We don't check the absolute_error loss here. This is because + # LightGBM's computation of the median (used for the initial value of + # raw_prediction) is a bit off (they'll e.g. return midpoints when there + # is no need to.). Since these tests only run 1 iteration, the + # discrepancy between the initial values leads to biggish differences in + # the predictions. These differences are much smaller with more + # iterations. + pytest.importorskip("lightgbm") + + rng = np.random.RandomState(seed=seed) + max_iter = 1 + max_bins = 255 + + X, y = make_regression( + n_samples=n_samples, n_features=5, n_informative=5, random_state=0 + ) + + if loss in ("gamma", "poisson"): + # make the target positive + y = np.abs(y) + np.mean(np.abs(y)) + + if n_samples > 255: + # bin data and convert it to float32 so that the estimator doesn't + # treat it as pre-binned + X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng) + + est_sklearn = HistGradientBoostingRegressor( + loss=loss, + max_iter=max_iter, + max_bins=max_bins, + learning_rate=1, + early_stopping=False, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=max_leaf_nodes, + ) + est_lightgbm = get_equivalent_estimator(est_sklearn, lib="lightgbm") + est_lightgbm.set_params(min_sum_hessian_in_leaf=0) + + est_lightgbm.fit(X_train, y_train) + est_sklearn.fit(X_train, y_train) + + # We need X to be treated an numerical data, not pre-binned data. + X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32) + + pred_lightgbm = est_lightgbm.predict(X_train) + pred_sklearn = est_sklearn.predict(X_train) + if loss in ("gamma", "poisson"): + # More than 65% of the predictions must be close up to the 2nd decimal. + # TODO: We are not entirely satisfied with this lax comparison, but the root + # cause is not clear, maybe algorithmic differences. One such example is the + # poisson_max_delta_step parameter of LightGBM which does not exist in HGBT. + assert ( + np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-2, atol=1e-2)) + > 0.65 + ) + else: + # Less than 1% of the predictions may deviate more than 1e-3 in relative terms. + assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-3)) > 1 - 0.01 + + if max_leaf_nodes < 10 and n_samples >= 1000 and loss in ("squared_error",): + pred_lightgbm = est_lightgbm.predict(X_test) + pred_sklearn = est_sklearn.predict(X_test) + # Less than 1% of the predictions may deviate more than 1e-4 in relative terms. + assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-4)) > 1 - 0.01 + + +@pytest.mark.parametrize("seed", range(5)) +@pytest.mark.parametrize("min_samples_leaf", (1, 20)) +@pytest.mark.parametrize( + "n_samples, max_leaf_nodes", + [ + (255, 4096), + (1000, 8), + ], +) +def test_same_predictions_classification( + seed, min_samples_leaf, n_samples, max_leaf_nodes +): + # Same as test_same_predictions_regression but for classification + pytest.importorskip("lightgbm") + + rng = np.random.RandomState(seed=seed) + max_iter = 1 + n_classes = 2 + max_bins = 255 + + X, y = make_classification( + n_samples=n_samples, + n_classes=n_classes, + n_features=5, + n_informative=5, + n_redundant=0, + random_state=0, + ) + + if n_samples > 255: + # bin data and convert it to float32 so that the estimator doesn't + # treat it as pre-binned + X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng) + + est_sklearn = HistGradientBoostingClassifier( + loss="log_loss", + max_iter=max_iter, + max_bins=max_bins, + learning_rate=1, + early_stopping=False, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=max_leaf_nodes, + ) + est_lightgbm = get_equivalent_estimator( + est_sklearn, lib="lightgbm", n_classes=n_classes + ) + + est_lightgbm.fit(X_train, y_train) + est_sklearn.fit(X_train, y_train) + + # We need X to be treated an numerical data, not pre-binned data. + X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32) + + pred_lightgbm = est_lightgbm.predict(X_train) + pred_sklearn = est_sklearn.predict(X_train) + assert np.mean(pred_sklearn == pred_lightgbm) > 0.89 + + acc_lightgbm = accuracy_score(y_train, pred_lightgbm) + acc_sklearn = accuracy_score(y_train, pred_sklearn) + np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn) + + if max_leaf_nodes < 10 and n_samples >= 1000: + pred_lightgbm = est_lightgbm.predict(X_test) + pred_sklearn = est_sklearn.predict(X_test) + assert np.mean(pred_sklearn == pred_lightgbm) > 0.89 + + acc_lightgbm = accuracy_score(y_test, pred_lightgbm) + acc_sklearn = accuracy_score(y_test, pred_sklearn) + np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2) + + +@pytest.mark.parametrize("seed", range(5)) +@pytest.mark.parametrize("min_samples_leaf", (1, 20)) +@pytest.mark.parametrize( + "n_samples, max_leaf_nodes", + [ + (255, 4096), + (10000, 8), + ], +) +def test_same_predictions_multiclass_classification( + seed, min_samples_leaf, n_samples, max_leaf_nodes +): + # Same as test_same_predictions_regression but for classification + pytest.importorskip("lightgbm") + + rng = np.random.RandomState(seed=seed) + n_classes = 3 + max_iter = 1 + max_bins = 255 + lr = 1 + + X, y = make_classification( + n_samples=n_samples, + n_classes=n_classes, + n_features=5, + n_informative=5, + n_redundant=0, + n_clusters_per_class=1, + random_state=0, + ) + + if n_samples > 255: + # bin data and convert it to float32 so that the estimator doesn't + # treat it as pre-binned + X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng) + + est_sklearn = HistGradientBoostingClassifier( + loss="log_loss", + max_iter=max_iter, + max_bins=max_bins, + learning_rate=lr, + early_stopping=False, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=max_leaf_nodes, + ) + est_lightgbm = get_equivalent_estimator( + est_sklearn, lib="lightgbm", n_classes=n_classes + ) + + est_lightgbm.fit(X_train, y_train) + est_sklearn.fit(X_train, y_train) + + # We need X to be treated an numerical data, not pre-binned data. + X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32) + + pred_lightgbm = est_lightgbm.predict(X_train) + pred_sklearn = est_sklearn.predict(X_train) + assert np.mean(pred_sklearn == pred_lightgbm) > 0.89 + + proba_lightgbm = est_lightgbm.predict_proba(X_train) + proba_sklearn = est_sklearn.predict_proba(X_train) + # assert more than 75% of the predicted probabilities are the same up to + # the second decimal + assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75 + + acc_lightgbm = accuracy_score(y_train, pred_lightgbm) + acc_sklearn = accuracy_score(y_train, pred_sklearn) + + np.testing.assert_allclose(acc_lightgbm, acc_sklearn, rtol=0, atol=5e-2) + + if max_leaf_nodes < 10 and n_samples >= 1000: + pred_lightgbm = est_lightgbm.predict(X_test) + pred_sklearn = est_sklearn.predict(X_test) + assert np.mean(pred_sklearn == pred_lightgbm) > 0.89 + + proba_lightgbm = est_lightgbm.predict_proba(X_train) + proba_sklearn = est_sklearn.predict_proba(X_train) + # assert more than 75% of the predicted probabilities are the same up + # to the second decimal + assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75 + + acc_lightgbm = accuracy_score(y_test, pred_lightgbm) + acc_sklearn = accuracy_score(y_test, pred_sklearn) + np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..e14d786b5bc747577a54054e21beab798cc320f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py @@ -0,0 +1,1683 @@ +import copyreg +import io +import pickle +import re +import warnings +from unittest.mock import Mock + +import joblib +import numpy as np +import pytest +from joblib.numpy_pickle import NumpyPickler +from numpy.testing import assert_allclose, assert_array_equal + +import sklearn +from sklearn._loss.loss import ( + AbsoluteError, + HalfBinomialLoss, + HalfSquaredError, + PinballLoss, +) +from sklearn.base import BaseEstimator, TransformerMixin, clone, is_regressor +from sklearn.compose import make_column_transformer +from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression +from sklearn.dummy import DummyRegressor +from sklearn.ensemble import ( + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper +from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE +from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower +from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor +from sklearn.exceptions import NotFittedError +from sklearn.metrics import get_scorer, mean_gamma_deviance, mean_poisson_deviance +from sklearn.model_selection import cross_val_score, train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder +from sklearn.utils import _IS_32BIT, shuffle +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads +from sklearn.utils._testing import _convert_container + +n_threads = _openmp_effective_n_threads() + +X_classification, y_classification = make_classification(random_state=0) +X_regression, y_regression = make_regression(random_state=0) +X_multi_classification, y_multi_classification = make_classification( + n_classes=3, n_informative=3, random_state=0 +) + + +def _make_dumb_dataset(n_samples): + """Make a dumb dataset to test early stopping.""" + rng = np.random.RandomState(42) + X_dumb = rng.randn(n_samples, 1) + y_dumb = (X_dumb[:, 0] > 0).astype("int64") + return X_dumb, y_dumb + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"interaction_cst": [0, 1]}, + "Interaction constraints must be a sequence of tuples or lists", + ), + ( + {"interaction_cst": [{0, 9999}]}, + r"Interaction constraints must consist of integer indices in \[0," + r" n_features - 1\] = \[.*\], specifying the position of features,", + ), + ( + {"interaction_cst": [{-1, 0}]}, + r"Interaction constraints must consist of integer indices in \[0," + r" n_features - 1\] = \[.*\], specifying the position of features,", + ), + ( + {"interaction_cst": [{0.5}]}, + r"Interaction constraints must consist of integer indices in \[0," + r" n_features - 1\] = \[.*\], specifying the position of features,", + ), + ], +) +def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg): + with pytest.raises(ValueError, match=err_msg): + GradientBoosting(**params).fit(X, y) + + +@pytest.mark.parametrize( + "scoring, validation_fraction, early_stopping, n_iter_no_change, tol", + [ + ("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer + ("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train + (None, 0.1, True, 5, 1e-7), # same with default scorer + (None, None, True, 5, 1e-1), + ("loss", 0.1, True, 5, 1e-7), # use loss + ("loss", None, True, 5, 1e-1), # use loss on training data + (None, None, False, 5, 0.0), # no early stopping + ], +) +def test_early_stopping_regression( + scoring, validation_fraction, early_stopping, n_iter_no_change, tol +): + max_iter = 200 + + X, y = make_regression(n_samples=50, random_state=0) + + gb = HistGradientBoostingRegressor( + verbose=1, # just for coverage + min_samples_leaf=5, # easier to overfit fast + scoring=scoring, + tol=tol, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + max_iter=max_iter, + n_iter_no_change=n_iter_no_change, + random_state=0, + ) + gb.fit(X, y) + + if early_stopping: + assert n_iter_no_change <= gb.n_iter_ < max_iter + else: + assert gb.n_iter_ == max_iter + + +@pytest.mark.parametrize( + "data", + ( + make_classification(n_samples=30, random_state=0), + make_classification( + n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0 + ), + ), +) +@pytest.mark.parametrize( + "scoring, validation_fraction, early_stopping, n_iter_no_change, tol", + [ + ("accuracy", 0.1, True, 5, 1e-7), # use scorer + ("accuracy", None, True, 5, 1e-1), # use scorer on training data + (None, 0.1, True, 5, 1e-7), # same with default scorer + (None, None, True, 5, 1e-1), + ("loss", 0.1, True, 5, 1e-7), # use loss + ("loss", None, True, 5, 1e-1), # use loss on training data + (None, None, False, 5, 0.0), # no early stopping + ], +) +def test_early_stopping_classification( + data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol +): + max_iter = 50 + + X, y = data + + gb = HistGradientBoostingClassifier( + verbose=1, # just for coverage + min_samples_leaf=5, # easier to overfit fast + scoring=scoring, + tol=tol, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + max_iter=max_iter, + n_iter_no_change=n_iter_no_change, + random_state=0, + ) + gb.fit(X, y) + + if early_stopping is True: + assert n_iter_no_change <= gb.n_iter_ < max_iter + else: + assert gb.n_iter_ == max_iter + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, *_make_dumb_dataset(10000)), + (HistGradientBoostingClassifier, *_make_dumb_dataset(10001)), + (HistGradientBoostingRegressor, *_make_dumb_dataset(10000)), + (HistGradientBoostingRegressor, *_make_dumb_dataset(10001)), + ], +) +def test_early_stopping_default(GradientBoosting, X, y): + # Test that early stopping is enabled by default if and only if there + # are more than 10000 samples + gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1) + gb.fit(X, y) + if X.shape[0] > 10000: + assert gb.n_iter_ < gb.max_iter + else: + assert gb.n_iter_ == gb.max_iter + + +@pytest.mark.parametrize( + "scores, n_iter_no_change, tol, stopping", + [ + ([], 1, 0.001, False), # not enough iterations + ([1, 1, 1], 5, 0.001, False), # not enough iterations + ([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations + ([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement + ([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement + ([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement + ([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement + ([1] * 6, 5, 0.0, True), # no significant improvement + ([1] * 6, 5, 0.001, True), # no significant improvement + ([1] * 6, 5, 5, True), # no significant improvement + ], +) +def test_should_stop(scores, n_iter_no_change, tol, stopping): + gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol) + assert gbdt._should_stop(scores) == stopping + + +def test_absolute_error(): + # For coverage only. + X, y = make_regression(n_samples=500, random_state=0) + gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0) + gbdt.fit(X, y) + assert gbdt.score(X, y) > 0.9 + + +def test_absolute_error_sample_weight(): + # non regression test for issue #19400 + # make sure no error is thrown during fit of + # HistGradientBoostingRegressor with absolute_error loss function + # and passing sample_weight + rng = np.random.RandomState(0) + n_samples = 100 + X = rng.uniform(-1, 1, size=(n_samples, 2)) + y = rng.uniform(-1, 1, size=n_samples) + sample_weight = rng.uniform(0, 1, size=n_samples) + gbdt = HistGradientBoostingRegressor(loss="absolute_error") + gbdt.fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 1.0, 2.0])]) +def test_gamma_y_positive(y): + # Test that ValueError is raised if any y_i <= 0. + err_msg = r"loss='gamma' requires strictly positive y." + gbdt = HistGradientBoostingRegressor(loss="gamma", random_state=0) + with pytest.raises(ValueError, match=err_msg): + gbdt.fit(np.zeros(shape=(len(y), 1)), y) + + +def test_gamma(): + # For a Gamma distributed target, we expect an HGBT trained with the Gamma deviance + # (loss) to give better results than an HGBT with any other loss function, measured + # in out-of-sample Gamma deviance as metric/score. + # Note that squared error could potentially predict negative values which is + # invalid (np.inf) for the Gamma deviance. A Poisson HGBT (having a log link) + # does not have that defect. + # Important note: It seems that a Poisson HGBT almost always has better + # out-of-sample performance than the Gamma HGBT, measured in Gamma deviance. + # LightGBM shows the same behaviour. Hence, we only compare to a squared error + # HGBT, but not to a Poisson deviance HGBT. + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 100, 20 + X = make_low_rank_matrix( + n_samples=n_train + n_test, + n_features=n_features, + random_state=rng, + ) + # We create a log-linear Gamma model. This gives y.min ~ 1e-2, y.max ~ 1e2 + coef = rng.uniform(low=-10, high=20, size=n_features) + # Numpy parametrizes gamma(shape=k, scale=theta) with mean = k * theta and + # variance = k * theta^2. We parametrize it instead with mean = exp(X @ coef) + # and variance = dispersion * mean^2 by setting k = 1 / dispersion, + # theta = dispersion * mean. + dispersion = 0.5 + y = rng.gamma(shape=1 / dispersion, scale=dispersion * np.exp(X @ coef)) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=rng + ) + gbdt_gamma = HistGradientBoostingRegressor(loss="gamma", random_state=123) + gbdt_mse = HistGradientBoostingRegressor(loss="squared_error", random_state=123) + dummy = DummyRegressor(strategy="mean") + for model in (gbdt_gamma, gbdt_mse, dummy): + model.fit(X_train, y_train) + + for X, y in [(X_train, y_train), (X_test, y_test)]: + loss_gbdt_gamma = mean_gamma_deviance(y, gbdt_gamma.predict(X)) + # We restrict the squared error HGBT to predict at least the minimum seen y at + # train time to make it strictly positive. + loss_gbdt_mse = mean_gamma_deviance( + y, np.maximum(np.min(y_train), gbdt_mse.predict(X)) + ) + loss_dummy = mean_gamma_deviance(y, dummy.predict(X)) + assert loss_gbdt_gamma < loss_dummy + assert loss_gbdt_gamma < loss_gbdt_mse + + +@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8]) +def test_quantile_asymmetric_error(quantile): + """Test quantile regression for asymmetric distributed targets.""" + n_samples = 10_000 + rng = np.random.RandomState(42) + # take care that X @ coef + intercept > 0 + X = np.concatenate( + ( + np.abs(rng.randn(n_samples)[:, None]), + -rng.randint(2, size=(n_samples, 1)), + ), + axis=1, + ) + intercept = 1.23 + coef = np.array([0.5, -2]) + # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), + # the quantile at level q is: + # quantile(q) = - log(1 - q) / lambda + # scale = 1/lambda = -quantile(q) / log(1-q) + y = rng.exponential( + scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples + ) + model = HistGradientBoostingRegressor( + loss="quantile", + quantile=quantile, + max_iter=25, + random_state=0, + max_leaf_nodes=10, + ).fit(X, y) + assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2) + + pinball_loss = PinballLoss(quantile=quantile) + loss_true_quantile = pinball_loss(y, X @ coef + intercept) + loss_pred_quantile = pinball_loss(y, model.predict(X)) + # we are overfitting + assert loss_pred_quantile <= loss_true_quantile + + +@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])]) +def test_poisson_y_positive(y): + # Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0. + err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0." + gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0) + with pytest.raises(ValueError, match=err_msg): + gbdt.fit(np.zeros(shape=(len(y), 1)), y) + + +def test_poisson(): + # For Poisson distributed target, Poisson loss should give better results + # than least squares measured in Poisson deviance as metric. + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 100, 100 + X = make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + # We create a log-linear Poisson model and downscale coef as it will get + # exponentiated. + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=rng + ) + gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng) + gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng) + gbdt_pois.fit(X_train, y_train) + gbdt_ls.fit(X_train, y_train) + dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) + + for X, y in [(X_train, y_train), (X_test, y_test)]: + metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X)) + # squared_error might produce non-positive predictions => clip + metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None)) + metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) + assert metric_pois < metric_ls + assert metric_pois < metric_dummy + + +def test_binning_train_validation_are_separated(): + # Make sure training and validation data are binned separately. + # See issue 13926 + + rng = np.random.RandomState(0) + validation_fraction = 0.2 + gb = HistGradientBoostingClassifier( + early_stopping=True, validation_fraction=validation_fraction, random_state=rng + ) + gb.fit(X_classification, y_classification) + mapper_training_data = gb._bin_mapper + + # Note that since the data is small there is no subsampling and the + # random_state doesn't matter + mapper_whole_data = _BinMapper(random_state=0) + mapper_whole_data.fit(X_classification) + + n_samples = X_classification.shape[0] + assert np.all( + mapper_training_data.n_bins_non_missing_ + == int((1 - validation_fraction) * n_samples) + ) + assert np.all( + mapper_training_data.n_bins_non_missing_ + != mapper_whole_data.n_bins_non_missing_ + ) + + +def test_missing_values_trivial(): + # sanity check for missing values support. With only one feature and + # y == isnan(X), the gbdt is supposed to reach perfect accuracy on the + # training set. + + n_samples = 100 + n_features = 1 + rng = np.random.RandomState(0) + + X = rng.normal(size=(n_samples, n_features)) + mask = rng.binomial(1, 0.5, size=X.shape).astype(bool) + X[mask] = np.nan + y = mask.ravel() + gb = HistGradientBoostingClassifier() + gb.fit(X, y) + + assert gb.score(X, y) == pytest.approx(1) + + +@pytest.mark.parametrize("problem", ("classification", "regression")) +@pytest.mark.parametrize( + ( + "missing_proportion, expected_min_score_classification, " + "expected_min_score_regression" + ), + [(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)], +) +def test_missing_values_resilience( + problem, + missing_proportion, + expected_min_score_classification, + expected_min_score_regression, +): + # Make sure the estimators can deal with missing values and still yield + # decent predictions + + rng = np.random.RandomState(0) + n_samples = 1000 + n_features = 2 + if problem == "regression": + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + random_state=rng, + ) + gb = HistGradientBoostingRegressor() + expected_min_score = expected_min_score_regression + else: + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + n_redundant=0, + n_repeated=0, + random_state=rng, + ) + gb = HistGradientBoostingClassifier() + expected_min_score = expected_min_score_classification + + mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool) + X[mask] = np.nan + + gb.fit(X, y) + + assert gb.score(X, y) > expected_min_score + + +@pytest.mark.parametrize( + "data", + [ + make_classification(random_state=0, n_classes=2), + make_classification(random_state=0, n_classes=3, n_informative=3), + ], + ids=["binary_log_loss", "multiclass_log_loss"], +) +def test_zero_division_hessians(data): + # non regression test for issue #14018 + # make sure we avoid zero division errors when computing the leaves values. + + # If the learning rate is too high, the raw predictions are bad and will + # saturate the softmax (or sigmoid in binary classif). This leads to + # probabilities being exactly 0 or 1, gradients being constant, and + # hessians being zero. + X, y = data + gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10) + gb.fit(X, y) + + +def test_small_trainset(): + # Make sure that the small trainset is stratified and has the expected + # length (10k samples) + n_samples = 20000 + original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4} + rng = np.random.RandomState(42) + X = rng.randn(n_samples).reshape(n_samples, 1) + y = [ + [class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items() + ] + y = shuffle(np.concatenate(y)) + gb = HistGradientBoostingClassifier() + + # Compute the small training set + X_small, y_small, *_ = gb._get_small_trainset( + X, y, seed=42, sample_weight_train=None + ) + + # Compute the class distribution in the small training set + unique, counts = np.unique(y_small, return_counts=True) + small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)} + + # Test that the small training set has the expected length + assert X_small.shape[0] == 10000 + assert y_small.shape[0] == 10000 + + # Test that the class distributions in the whole dataset and in the small + # training set are identical + assert small_distrib == pytest.approx(original_distrib) + + +def test_missing_values_minmax_imputation(): + # Compare the buit-in missing value handling of Histogram GBC with an + # a-priori missing value imputation strategy that should yield the same + # results in terms of decision function. + # + # Each feature (containing NaNs) is replaced by 2 features: + # - one where the nans are replaced by min(feature) - 1 + # - one where the nans are replaced by max(feature) + 1 + # A split where nans go to the left has an equivalent split in the + # first (min) feature, and a split where nans go to the right has an + # equivalent split in the second (max) feature. + # + # Assuming the data is such that there is never a tie to select the best + # feature to split on during training, the learned decision trees should be + # strictly equivalent (learn a sequence of splits that encode the same + # decision function). + # + # The MinMaxImputer transformer is meant to be a toy implementation of the + # "Missing In Attributes" (MIA) missing value handling for decision trees + # https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305 + # The implementation of MIA as an imputation transformer was suggested by + # "Remark 3" in :arxiv:'<1902.06931>` + + class MinMaxImputer(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + mm = MinMaxScaler().fit(X) + self.data_min_ = mm.data_min_ + self.data_max_ = mm.data_max_ + return self + + def transform(self, X): + X_min, X_max = X.copy(), X.copy() + + for feature_idx in range(X.shape[1]): + nan_mask = np.isnan(X[:, feature_idx]) + X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1 + X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1 + + return np.concatenate([X_min, X_max], axis=1) + + def make_missing_value_data(n_samples=int(1e4), seed=0): + rng = np.random.RandomState(seed) + X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng) + + # Pre-bin the data to ensure a deterministic handling by the 2 + # strategies and also make it easier to insert np.nan in a structured + # way: + X = KBinsDiscretizer(n_bins=42, encode="ordinal").fit_transform(X) + + # First feature has missing values completely at random: + rnd_mask = rng.rand(X.shape[0]) > 0.9 + X[rnd_mask, 0] = np.nan + + # Second and third features have missing values for extreme values + # (censoring missingness): + low_mask = X[:, 1] == 0 + X[low_mask, 1] = np.nan + + high_mask = X[:, 2] == X[:, 2].max() + X[high_mask, 2] = np.nan + + # Make the last feature nan pattern very informative: + y_max = np.percentile(y, 70) + y_max_mask = y >= y_max + y[y_max_mask] = y_max + X[y_max_mask, 3] = np.nan + + # Check that there is at least one missing value in each feature: + for feature_idx in range(X.shape[1]): + assert any(np.isnan(X[:, feature_idx])) + + # Let's use a test set to check that the learned decision function is + # the same as evaluated on unseen data. Otherwise it could just be the + # case that we find two independent ways to overfit the training set. + return train_test_split(X, y, random_state=rng) + + # n_samples need to be large enough to minimize the likelihood of having + # several candidate splits with the same gain value in a given tree. + X_train, X_test, y_train, y_test = make_missing_value_data( + n_samples=int(1e4), seed=0 + ) + + # Use a small number of leaf nodes and iterations so as to keep + # under-fitting models to minimize the likelihood of ties when training the + # model. + gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0) + gbm1.fit(X_train, y_train) + + gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1)) + gbm2.fit(X_train, y_train) + + # Check that the model reach the same score: + assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train)) + + assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test)) + + # Check the individual prediction match as a finer grained + # decision function check. + assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train)) + assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test)) + + +def test_infinite_values(): + # Basic test for infinite values + + X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1) + y = np.array([0, 0, 1, 1]) + + gbdt = HistGradientBoostingRegressor(min_samples_leaf=1) + gbdt.fit(X, y) + np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4) + + +def test_consistent_lengths(): + X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1) + y = np.array([0, 0, 1, 1]) + sample_weight = np.array([0.1, 0.3, 0.1]) + gbdt = HistGradientBoostingRegressor() + with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"): + gbdt.fit(X, y, sample_weight) + + with pytest.raises( + ValueError, match="Found input variables with inconsistent number" + ): + gbdt.fit(X, y[1:]) + + +def test_infinite_values_missing_values(): + # High level test making sure that inf and nan values are properly handled + # when both are present. This is similar to + # test_split_on_nan_with_infinite_values() in test_grower.py, though we + # cannot check the predictions for binned values here. + + X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1) + y_isnan = np.isnan(X.ravel()) + y_isinf = X.ravel() == np.inf + + stump_clf = HistGradientBoostingClassifier( + min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2 + ) + + assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1 + assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1 + + +@pytest.mark.parametrize("scoring", [None, "loss"]) +def test_string_target_early_stopping(scoring): + # Regression tests for #14709 where the targets need to be encoded before + # to compute the score + rng = np.random.RandomState(42) + X = rng.randn(100, 10) + y = np.array(["x"] * 50 + ["y"] * 50, dtype=object) + gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring) + gbrt.fit(X, y) + + +def test_zero_sample_weights_regression(): + # Make sure setting a SW to zero amounts to ignoring the corresponding + # sample + + X = [[1, 0], [1, 0], [1, 0], [0, 1]] + y = [0, 0, 1, 0] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1] + gb = HistGradientBoostingRegressor(min_samples_leaf=1) + gb.fit(X, y, sample_weight=sample_weight) + assert gb.predict([[1, 0]])[0] > 0.5 + + +def test_zero_sample_weights_classification(): + # Make sure setting a SW to zero amounts to ignoring the corresponding + # sample + + X = [[1, 0], [1, 0], [1, 0], [0, 1]] + y = [0, 0, 1, 0] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1] + gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1) + gb.fit(X, y, sample_weight=sample_weight) + assert_array_equal(gb.predict([[1, 0]]), [1]) + + X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]] + y = [0, 0, 1, 0, 2] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1, 1] + gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1) + gb.fit(X, y, sample_weight=sample_weight) + assert_array_equal(gb.predict([[1, 0]]), [1]) + + +@pytest.mark.parametrize( + "problem", ("regression", "binary_classification", "multiclass_classification") +) +@pytest.mark.parametrize("duplication", ("half", "all")) +def test_sample_weight_effect(problem, duplication): + # High level test to make sure that duplicating a sample is equivalent to + # giving it weight of 2. + + # fails for n_samples > 255 because binning does not take sample weights + # into account. Keeping n_samples <= 255 makes + # sure only unique values are used so SW have no effect on binning. + n_samples = 255 + n_features = 2 + if problem == "regression": + X, y = make_regression( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + random_state=0, + ) + Klass = HistGradientBoostingRegressor + else: + n_classes = 2 if problem == "binary_classification" else 3 + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + n_redundant=0, + n_clusters_per_class=1, + n_classes=n_classes, + random_state=0, + ) + Klass = HistGradientBoostingClassifier + + # This test can't pass if min_samples_leaf > 1 because that would force 2 + # samples to be in the same node in est_sw, while these samples would be + # free to be separate in est_dup: est_dup would just group together the + # duplicated samples. + est = Klass(min_samples_leaf=1) + + # Create dataset with duplicate and corresponding sample weights + if duplication == "half": + lim = n_samples // 2 + else: + lim = n_samples + X_dup = np.r_[X, X[:lim]] + y_dup = np.r_[y, y[:lim]] + sample_weight = np.ones(shape=(n_samples)) + sample_weight[:lim] = 2 + + est_sw = clone(est).fit(X, y, sample_weight=sample_weight) + est_dup = clone(est).fit(X_dup, y_dup) + + # checking raw_predict is stricter than just predict for classification + assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup)) + + +@pytest.mark.parametrize("Loss", (HalfSquaredError, AbsoluteError)) +def test_sum_hessians_are_sample_weight(Loss): + # For losses with constant hessians, the sum_hessians field of the + # histograms must be equal to the sum of the sample weight of samples at + # the corresponding bin. + + rng = np.random.RandomState(0) + n_samples = 1000 + n_features = 2 + X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng) + bin_mapper = _BinMapper() + X_binned = bin_mapper.fit_transform(X) + + # While sample weights are supposed to be positive, this still works. + sample_weight = rng.normal(size=n_samples) + + loss = Loss(sample_weight=sample_weight) + gradients, hessians = loss.init_gradient_and_hessian( + n_samples=n_samples, dtype=G_H_DTYPE + ) + gradients, hessians = gradients.reshape((-1, 1)), hessians.reshape((-1, 1)) + raw_predictions = rng.normal(size=(n_samples, 1)) + loss.gradient_hessian( + y_true=y, + raw_prediction=raw_predictions, + sample_weight=sample_weight, + gradient_out=gradients, + hessian_out=hessians, + n_threads=n_threads, + ) + + # build sum_sample_weight which contains the sum of the sample weights at + # each bin (for each feature). This must be equal to the sum_hessians + # field of the corresponding histogram + sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins)) + for feature_idx in range(n_features): + for sample_idx in range(n_samples): + sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[ + sample_idx + ] + + # Build histogram + grower = TreeGrower( + X_binned, gradients[:, 0], hessians[:, 0], n_bins=bin_mapper.n_bins + ) + histograms = grower.histogram_builder.compute_histograms_brute( + grower.root.sample_indices + ) + + for feature_idx in range(n_features): + for bin_idx in range(bin_mapper.n_bins): + assert histograms[feature_idx, bin_idx]["sum_hessians"] == ( + pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5) + ) + + +def test_max_depth_max_leaf_nodes(): + # Non regression test for + # https://github.com/scikit-learn/scikit-learn/issues/16179 + # there was a bug when the max_depth and the max_leaf_nodes criteria were + # met at the same time, which would lead to max_leaf_nodes not being + # respected. + X, y = make_classification(random_state=0) + est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit( + X, y + ) + tree = est._predictors[0][0] + assert tree.get_max_depth() == 2 + assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix + + +def test_early_stopping_on_test_set_with_warm_start(): + # Non regression test for #16661 where second fit fails with + # warm_start=True, early_stopping is on, and no validation set + X, y = make_classification(random_state=0) + gb = HistGradientBoostingClassifier( + max_iter=1, + scoring="loss", + warm_start=True, + early_stopping=True, + n_iter_no_change=1, + validation_fraction=None, + ) + + gb.fit(X, y) + # does not raise on second call + gb.set_params(max_iter=2) + gb.fit(X, y) + + +def test_early_stopping_with_sample_weights(monkeypatch): + """Check that sample weights is passed in to the scorer and _raw_predict is not + called.""" + + mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error")) + + def mock_check_scoring(estimator, scoring): + assert scoring == "neg_median_absolute_error" + return mock_scorer + + monkeypatch.setattr( + sklearn.ensemble._hist_gradient_boosting.gradient_boosting, + "check_scoring", + mock_check_scoring, + ) + + X, y = make_regression(random_state=0) + sample_weight = np.ones_like(y) + hist = HistGradientBoostingRegressor( + max_iter=2, + early_stopping=True, + random_state=0, + scoring="neg_median_absolute_error", + ) + mock_raw_predict = Mock(side_effect=hist._raw_predict) + hist._raw_predict = mock_raw_predict + hist.fit(X, y, sample_weight=sample_weight) + + # _raw_predict should never be called with scoring as a string + assert mock_raw_predict.call_count == 0 + + # For scorer is called twice (train and val) for the baseline score, and twice + # per iteration (train and val) after that. So 6 times in total for `max_iter=2`. + assert mock_scorer.call_count == 6 + for arg_list in mock_scorer.call_args_list: + assert "sample_weight" in arg_list[1] + + +def test_raw_predict_is_called_with_custom_scorer(): + """Custom scorer will still call _raw_predict.""" + + mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error")) + + X, y = make_regression(random_state=0) + hist = HistGradientBoostingRegressor( + max_iter=2, + early_stopping=True, + random_state=0, + scoring=mock_scorer, + ) + mock_raw_predict = Mock(side_effect=hist._raw_predict) + hist._raw_predict = mock_raw_predict + hist.fit(X, y) + + # `_raw_predict` and scorer is called twice (train and val) for the baseline score, + # and twice per iteration (train and val) after that. So 6 times in total for + # `max_iter=2`. + assert mock_raw_predict.call_count == 6 + assert mock_scorer.call_count == 6 + + +@pytest.mark.parametrize( + "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor) +) +def test_single_node_trees(Est): + # Make sure it's still possible to build single-node trees. In that case + # the value of the root is set to 0. That's a correct value: if the tree is + # single-node that's because min_gain_to_split is not respected right from + # the root, so we don't want the tree to have any impact on the + # predictions. + + X, y = make_classification(random_state=0) + y[:] = 1 # constant target will lead to a single root node + + est = Est(max_iter=20) + est.fit(X, y) + + assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors) + assert all(predictor[0].nodes[0]["value"] == 0 for predictor in est._predictors) + # Still gives correct predictions thanks to the baseline prediction + assert_allclose(est.predict(X), y) + + +@pytest.mark.parametrize( + "Est, loss, X, y", + [ + ( + HistGradientBoostingClassifier, + HalfBinomialLoss(sample_weight=None), + X_classification, + y_classification, + ), + ( + HistGradientBoostingRegressor, + HalfSquaredError(sample_weight=None), + X_regression, + y_regression, + ), + ], +) +def test_custom_loss(Est, loss, X, y): + est = Est(loss=loss, max_iter=20) + est.fit(X, y) + + +@pytest.mark.parametrize( + "HistGradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ( + HistGradientBoostingClassifier, + X_multi_classification, + y_multi_classification, + ), + ], +) +def test_staged_predict(HistGradientBoosting, X, y): + # Test whether staged predictor eventually gives + # the same prediction. + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.5, random_state=0 + ) + gb = HistGradientBoosting(max_iter=10) + + # test raise NotFittedError if not fitted + with pytest.raises(NotFittedError): + next(gb.staged_predict(X_test)) + + gb.fit(X_train, y_train) + + # test if the staged predictions of each iteration + # are equal to the corresponding predictions of the same estimator + # trained from scratch. + # this also test limit case when max_iter = 1 + method_names = ( + ["predict"] + if is_regressor(gb) + else ["predict", "predict_proba", "decision_function"] + ) + for method_name in method_names: + staged_method = getattr(gb, "staged_" + method_name) + staged_predictions = list(staged_method(X_test)) + assert len(staged_predictions) == gb.n_iter_ + for n_iter, staged_predictions in enumerate(staged_method(X_test), 1): + aux = HistGradientBoosting(max_iter=n_iter) + aux.fit(X_train, y_train) + pred_aux = getattr(aux, method_name)(X_test) + + assert_allclose(staged_predictions, pred_aux) + assert staged_predictions.shape == pred_aux.shape + + +@pytest.mark.parametrize("insert_missing", [False, True]) +@pytest.mark.parametrize( + "Est", (HistGradientBoostingRegressor, HistGradientBoostingClassifier) +) +@pytest.mark.parametrize("bool_categorical_parameter", [True, False]) +@pytest.mark.parametrize("missing_value", [np.nan, -1]) +def test_unknown_categories_nan( + insert_missing, Est, bool_categorical_parameter, missing_value +): + # Make sure no error is raised at predict if a category wasn't seen during + # fit. We also make sure they're treated as nans. + + rng = np.random.RandomState(0) + n_samples = 1000 + f1 = rng.rand(n_samples) + f2 = rng.randint(4, size=n_samples) + X = np.c_[f1, f2] + y = np.zeros(shape=n_samples) + y[X[:, 1] % 2 == 0] = 1 + + if bool_categorical_parameter: + categorical_features = [False, True] + else: + categorical_features = [1] + + if insert_missing: + mask = rng.binomial(1, 0.01, size=X.shape).astype(bool) + assert mask.sum() > 0 + X[mask] = missing_value + + est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y) + assert_array_equal(est.is_categorical_, [False, True]) + + # Make sure no error is raised on unknown categories and nans + # unknown categories will be treated as nans + X_test = np.zeros((10, X.shape[1]), dtype=float) + X_test[:5, 1] = 30 + X_test[5:, 1] = missing_value + assert len(np.unique(est.predict(X_test))) == 1 + + +def test_categorical_encoding_strategies(): + # Check native categorical handling vs different encoding strategies. We + # make sure that native encoding needs only 1 split to achieve a perfect + # prediction on a simple dataset. In contrast, OneHotEncoded data needs + # more depth / splits, and treating categories as ordered (just using + # OrdinalEncoder) requires even more depth. + + # dataset with one random continuous feature, and one categorical feature + # with values in [0, 5], e.g. from an OrdinalEncoder. + # class == 1 iff categorical value in {0, 2, 4} + rng = np.random.RandomState(0) + n_samples = 10_000 + f1 = rng.rand(n_samples) + f2 = rng.randint(6, size=n_samples) + X = np.c_[f1, f2] + y = np.zeros(shape=n_samples) + y[X[:, 1] % 2 == 0] = 1 + + # make sure dataset is balanced so that the baseline_prediction doesn't + # influence predictions too much with max_iter = 1 + assert 0.49 < y.mean() < 0.51 + + native_cat_specs = [ + [False, True], + [1], + ] + try: + import pandas as pd + + X = pd.DataFrame(X, columns=["f_0", "f_1"]) + native_cat_specs.append(["f_1"]) + except ImportError: + pass + + for native_cat_spec in native_cat_specs: + clf_cat = HistGradientBoostingClassifier( + max_iter=1, max_depth=1, categorical_features=native_cat_spec + ) + clf_cat.fit(X, y) + + # Using native categorical encoding, we get perfect predictions with just + # one split + assert cross_val_score(clf_cat, X, y).mean() == 1 + + # quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21 + expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0] + left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0] + assert_array_equal(left_bitset, expected_left_bitset) + + # Treating categories as ordered, we need more depth / more splits to get + # the same predictions + clf_no_cat = HistGradientBoostingClassifier( + max_iter=1, max_depth=4, categorical_features=None + ) + assert cross_val_score(clf_no_cat, X, y).mean() < 0.9 + + clf_no_cat.set_params(max_depth=5) + assert cross_val_score(clf_no_cat, X, y).mean() == 1 + + # Using OHEd data, we need less splits than with pure OEd data, but we + # still need more splits than with the native categorical splits + ct = make_column_transformer( + (OneHotEncoder(sparse_output=False), [1]), remainder="passthrough" + ) + X_ohe = ct.fit_transform(X) + clf_no_cat.set_params(max_depth=2) + assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9 + + clf_no_cat.set_params(max_depth=3) + assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1 + + +@pytest.mark.parametrize( + "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor) +) +@pytest.mark.parametrize( + "categorical_features, monotonic_cst, expected_msg", + [ + ( + [b"hello", b"world"], + None, + re.escape( + "categorical_features must be an array-like of bool, int or str, " + "got: bytes40." + ), + ), + ( + np.array([b"hello", 1.3], dtype=object), + None, + re.escape( + "categorical_features must be an array-like of bool, int or str, " + "got: bytes, float." + ), + ), + ( + [0, -1], + None, + re.escape( + "categorical_features set as integer indices must be in " + "[0, n_features - 1]" + ), + ), + ( + [True, True, False, False, True], + None, + re.escape( + "categorical_features set as a boolean mask must have shape " + "(n_features,)" + ), + ), + ( + [True, True, False, False], + [0, -1, 0, 1], + "Categorical features cannot have monotonic constraints", + ), + ], +) +def test_categorical_spec_errors( + Est, categorical_features, monotonic_cst, expected_msg +): + # Test errors when categories are specified incorrectly + n_samples = 100 + X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples) + rng = np.random.RandomState(0) + X[:, 0] = rng.randint(0, 10, size=n_samples) + X[:, 1] = rng.randint(0, 10, size=n_samples) + est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst) + + with pytest.raises(ValueError, match=expected_msg): + est.fit(X, y) + + +@pytest.mark.parametrize( + "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor) +) +def test_categorical_spec_errors_with_feature_names(Est): + pd = pytest.importorskip("pandas") + n_samples = 10 + X = pd.DataFrame( + { + "f0": range(n_samples), + "f1": range(n_samples), + "f2": [1.0] * n_samples, + } + ) + y = [0, 1] * (n_samples // 2) + + est = Est(categorical_features=["f0", "f1", "f3"]) + expected_msg = re.escape( + "categorical_features has a item value 'f3' which is not a valid " + "feature name of the training data." + ) + with pytest.raises(ValueError, match=expected_msg): + est.fit(X, y) + + est = Est(categorical_features=["f0", "f1"]) + expected_msg = re.escape( + "categorical_features should be passed as an array of integers or " + "as a boolean mask when the model is fitted on data without feature " + "names." + ) + with pytest.raises(ValueError, match=expected_msg): + est.fit(X.to_numpy(), y) + + +@pytest.mark.parametrize( + "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor) +) +@pytest.mark.parametrize("categorical_features", ([False, False], [])) +@pytest.mark.parametrize("as_array", (True, False)) +def test_categorical_spec_no_categories(Est, categorical_features, as_array): + # Make sure we can properly detect that no categorical features are present + # even if the categorical_features parameter is not None + X = np.arange(10).reshape(5, 2) + y = np.arange(5) + if as_array: + categorical_features = np.asarray(categorical_features) + est = Est(categorical_features=categorical_features).fit(X, y) + assert est.is_categorical_ is None + + +@pytest.mark.parametrize( + "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor) +) +@pytest.mark.parametrize( + "use_pandas, feature_name", [(False, "at index 0"), (True, "'f0'")] +) +def test_categorical_bad_encoding_errors(Est, use_pandas, feature_name): + # Test errors when categories are encoded incorrectly + + gb = Est(categorical_features=[True], max_bins=2) + + if use_pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"f0": [0, 1, 2]}) + else: + X = np.array([[0, 1, 2]]).T + y = np.arange(3) + msg = ( + f"Categorical feature {feature_name} is expected to have a " + "cardinality <= 2 but actually has a cardinality of 3." + ) + with pytest.raises(ValueError, match=msg): + gb.fit(X, y) + + # nans are ignored in the counts + X = np.array([[0, 1, np.nan]]).T + y = np.arange(3) + gb.fit(X, y) + + +@pytest.mark.parametrize( + "Est", (HistGradientBoostingClassifier, HistGradientBoostingRegressor) +) +def test_uint8_predict(Est): + # Non regression test for + # https://github.com/scikit-learn/scikit-learn/issues/18408 + # Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It + # will be converted to X_DTYPE. + + rng = np.random.RandomState(0) + + X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8) + y = rng.randint(0, 2, size=10).astype(np.uint8) + est = Est() + est.fit(X, y) + est.predict(X) + + +@pytest.mark.parametrize( + "interaction_cst, n_features, result", + [ + (None, 931, None), + ([{0, 1}], 2, [{0, 1}]), + ("pairwise", 2, [{0, 1}]), + ("pairwise", 4, [{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}]), + ("no_interactions", 2, [{0}, {1}]), + ("no_interactions", 4, [{0}, {1}, {2}, {3}]), + ([(1, 0), [5, 1]], 6, [{0, 1}, {1, 5}, {2, 3, 4}]), + ], +) +def test_check_interaction_cst(interaction_cst, n_features, result): + """Check that _check_interaction_cst returns the expected list of sets""" + est = HistGradientBoostingRegressor() + est.set_params(interaction_cst=interaction_cst) + assert est._check_interaction_cst(n_features) == result + + +def test_interaction_cst_numerically(): + """Check that interaction constraints have no forbidden interactions.""" + rng = np.random.RandomState(42) + n_samples = 1000 + X = rng.uniform(size=(n_samples, 2)) + # Construct y with a strong interaction term + # y = x0 + x1 + 5 * x0 * x1 + y = np.hstack((X, 5 * X[:, [0]] * X[:, [1]])).sum(axis=1) + + est = HistGradientBoostingRegressor(random_state=42) + est.fit(X, y) + est_no_interactions = HistGradientBoostingRegressor( + interaction_cst=[{0}, {1}], random_state=42 + ) + est_no_interactions.fit(X, y) + + delta = 0.25 + # Make sure we do not extrapolate out of the training set as tree-based estimators + # are very bad in doing so. + X_test = X[(X[:, 0] < 1 - delta) & (X[:, 1] < 1 - delta)] + X_delta_d_0 = X_test + [delta, 0] + X_delta_0_d = X_test + [0, delta] + X_delta_d_d = X_test + [delta, delta] + + # Note: For the y from above as a function of x0 and x1, we have + # y(x0+d, x1+d) = y(x0, x1) + 5 * d * (2/5 + x0 + x1) + 5 * d**2 + # y(x0+d, x1) = y(x0, x1) + 5 * d * (1/5 + x1) + # y(x0, x1+d) = y(x0, x1) + 5 * d * (1/5 + x0) + # Without interaction constraints, we would expect a result of 5 * d**2 for the + # following expression, but zero with constraints in place. + assert_allclose( + est_no_interactions.predict(X_delta_d_d) + + est_no_interactions.predict(X_test) + - est_no_interactions.predict(X_delta_d_0) + - est_no_interactions.predict(X_delta_0_d), + 0, + atol=1e-12, + ) + + # Correct result of the expressions is 5 * delta**2. But this is hard to achieve by + # a fitted tree-based model. However, with 100 iterations the expression should + # at least be positive! + assert np.all( + est.predict(X_delta_d_d) + + est.predict(X_test) + - est.predict(X_delta_d_0) + - est.predict(X_delta_0_d) + > 0.01 + ) + + +def test_no_user_warning_with_scoring(): + """Check that no UserWarning is raised when scoring is set. + + Non-regression test for #22907. + """ + pd = pytest.importorskip("pandas") + X, y = make_regression(n_samples=50, random_state=0) + X_df = pd.DataFrame(X, columns=[f"col{i}" for i in range(X.shape[1])]) + + est = HistGradientBoostingRegressor( + random_state=0, scoring="neg_mean_absolute_error", early_stopping=True + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + est.fit(X_df, y) + + +def test_class_weights(): + """High level test to check class_weights.""" + n_samples = 255 + n_features = 2 + + X, y = make_classification( + n_samples=n_samples, + n_features=n_features, + n_informative=n_features, + n_redundant=0, + n_clusters_per_class=1, + n_classes=2, + random_state=0, + ) + y_is_1 = y == 1 + + # class_weight is the same as sample weights with the corresponding class + clf = HistGradientBoostingClassifier( + min_samples_leaf=2, random_state=0, max_depth=2 + ) + sample_weight = np.ones(shape=(n_samples)) + sample_weight[y_is_1] = 3.0 + clf.fit(X, y, sample_weight=sample_weight) + + class_weight = {0: 1.0, 1: 3.0} + clf_class_weighted = clone(clf).set_params(class_weight=class_weight) + clf_class_weighted.fit(X, y) + + assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X)) + + # Check that sample_weight and class_weight are multiplicative + clf.fit(X, y, sample_weight=sample_weight**2) + clf_class_weighted.fit(X, y, sample_weight=sample_weight) + assert_allclose(clf.decision_function(X), clf_class_weighted.decision_function(X)) + + # Make imbalanced dataset + X_imb = np.concatenate((X[~y_is_1], X[y_is_1][:10])) + y_imb = np.concatenate((y[~y_is_1], y[y_is_1][:10])) + + # class_weight="balanced" is the same as sample_weights to be + # inversely proportional to n_samples / (n_classes * np.bincount(y)) + clf_balanced = clone(clf).set_params(class_weight="balanced") + clf_balanced.fit(X_imb, y_imb) + + class_weight = y_imb.shape[0] / (2 * np.bincount(y_imb)) + sample_weight = class_weight[y_imb] + clf_sample_weight = clone(clf).set_params(class_weight=None) + clf_sample_weight.fit(X_imb, y_imb, sample_weight=sample_weight) + + assert_allclose( + clf_balanced.decision_function(X_imb), + clf_sample_weight.decision_function(X_imb), + ) + + +def test_unknown_category_that_are_negative(): + """Check that unknown categories that are negative does not error. + + Non-regression test for #24274. + """ + rng = np.random.RandomState(42) + n_samples = 1000 + X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)] + y = np.zeros(shape=n_samples) + y[X[:, 1] % 2 == 0] = 1 + + hist = HistGradientBoostingRegressor( + random_state=0, + categorical_features=[False, True], + max_iter=10, + ).fit(X, y) + + # Check that negative values from the second column are treated like a + # missing category + X_test_neg = np.asarray([[1, -2], [3, -4]]) + X_test_nan = np.asarray([[1, np.nan], [3, np.nan]]) + + assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan)) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +@pytest.mark.parametrize( + "HistGradientBoosting", + [HistGradientBoostingClassifier, HistGradientBoostingRegressor], +) +def test_dataframe_categorical_results_same_as_ndarray( + dataframe_lib, HistGradientBoosting +): + """Check that pandas categorical give the same results as ndarray.""" + pytest.importorskip(dataframe_lib) + + rng = np.random.RandomState(42) + n_samples = 5_000 + n_cardinality = 50 + max_bins = 100 + f_num = rng.rand(n_samples) + f_cat = rng.randint(n_cardinality, size=n_samples) + + # Make f_cat an informative feature + y = (f_cat % 3 == 0) & (f_num > 0.2) + + X = np.c_[f_num, f_cat] + f_cat = [f"cat{c:0>3}" for c in f_cat] + X_df = _convert_container( + np.asarray([f_num, f_cat]).T, + dataframe_lib, + ["f_num", "f_cat"], + categorical_feature_names=["f_cat"], + ) + + X_train, X_test, X_train_df, X_test_df, y_train, y_test = train_test_split( + X, X_df, y, random_state=0 + ) + + hist_kwargs = dict(max_iter=10, max_bins=max_bins, random_state=0) + hist_np = HistGradientBoosting(categorical_features=[False, True], **hist_kwargs) + hist_np.fit(X_train, y_train) + + hist_pd = HistGradientBoosting(categorical_features="from_dtype", **hist_kwargs) + hist_pd.fit(X_train_df, y_train) + + # Check categories are correct and sorted + categories = hist_pd._preprocessor.named_transformers_["encoder"].categories_[0] + assert_array_equal(categories, np.unique(f_cat)) + + assert len(hist_np._predictors) == len(hist_pd._predictors) + for predictor_1, predictor_2 in zip(hist_np._predictors, hist_pd._predictors): + assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes) + + score_np = hist_np.score(X_test, y_test) + score_pd = hist_pd.score(X_test_df, y_test) + assert score_np == pytest.approx(score_pd) + assert_allclose(hist_np.predict(X_test), hist_pd.predict(X_test_df)) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +@pytest.mark.parametrize( + "HistGradientBoosting", + [HistGradientBoostingClassifier, HistGradientBoostingRegressor], +) +def test_dataframe_categorical_errors(dataframe_lib, HistGradientBoosting): + """Check error cases for pandas categorical feature.""" + pytest.importorskip(dataframe_lib) + msg = "Categorical feature 'f_cat' is expected to have a cardinality <= 16" + hist = HistGradientBoosting(categorical_features="from_dtype", max_bins=16) + + rng = np.random.RandomState(42) + f_cat = rng.randint(0, high=100, size=100).astype(str) + X_df = _convert_container( + f_cat[:, None], dataframe_lib, ["f_cat"], categorical_feature_names=["f_cat"] + ) + y = rng.randint(0, high=2, size=100) + + with pytest.raises(ValueError, match=msg): + hist.fit(X_df, y) + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_categorical_different_order_same_model(dataframe_lib): + """Check that the order of the categorical gives same model.""" + pytest.importorskip(dataframe_lib) + rng = np.random.RandomState(42) + n_samples = 1_000 + f_ints = rng.randint(low=0, high=2, size=n_samples) + + # Construct a target with some noise + y = f_ints.copy() + flipped = rng.choice([True, False], size=n_samples, p=[0.1, 0.9]) + y[flipped] = 1 - y[flipped] + + # Construct categorical where 0 -> A and 1 -> B and 1 -> A and 0 -> B + f_cat_a_b = np.asarray(["A", "B"])[f_ints] + f_cat_b_a = np.asarray(["B", "A"])[f_ints] + df_a_b = _convert_container( + f_cat_a_b[:, None], + dataframe_lib, + ["f_cat"], + categorical_feature_names=["f_cat"], + ) + df_b_a = _convert_container( + f_cat_b_a[:, None], + dataframe_lib, + ["f_cat"], + categorical_feature_names=["f_cat"], + ) + + hist_a_b = HistGradientBoostingClassifier( + categorical_features="from_dtype", random_state=0 + ) + hist_b_a = HistGradientBoostingClassifier( + categorical_features="from_dtype", random_state=0 + ) + + hist_a_b.fit(df_a_b, y) + hist_b_a.fit(df_b_a, y) + + assert len(hist_a_b._predictors) == len(hist_b_a._predictors) + for predictor_1, predictor_2 in zip(hist_a_b._predictors, hist_b_a._predictors): + assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes) + + +# TODO(1.6): Remove warning and change default in 1.6 +def test_categorical_features_warn(): + """Raise warning when there are categorical features in the input DataFrame. + + This is not tested for polars because polars categories must always be + strings and strings can only be handled as categories. Therefore the + situation in which a categorical column is currently being treated as + numbers and in the future will be treated as categories cannot occur with + polars. + """ + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"a": pd.Series([1, 2, 3], dtype="category"), "b": [4, 5, 6]}) + y = [0, 1, 0] + hist = HistGradientBoostingClassifier(random_state=0) + + msg = "The categorical_features parameter will change to 'from_dtype' in v1.6" + with pytest.warns(FutureWarning, match=msg): + hist.fit(X, y) + + +def get_different_bitness_node_ndarray(node_ndarray): + new_dtype_for_indexing_fields = np.int64 if _IS_32BIT else np.int32 + + # field names in Node struct with np.intp types (see + # sklearn/ensemble/_hist_gradient_boosting/common.pyx) + indexing_field_names = ["feature_idx"] + + new_dtype_dict = { + name: dtype for name, (dtype, _) in node_ndarray.dtype.fields.items() + } + for name in indexing_field_names: + new_dtype_dict[name] = new_dtype_for_indexing_fields + + new_dtype = np.dtype( + {"names": list(new_dtype_dict.keys()), "formats": list(new_dtype_dict.values())} + ) + return node_ndarray.astype(new_dtype, casting="same_kind") + + +def reduce_predictor_with_different_bitness(predictor): + cls, args, state = predictor.__reduce__() + + new_state = state.copy() + new_state["nodes"] = get_different_bitness_node_ndarray(new_state["nodes"]) + + return (cls, args, new_state) + + +def test_different_bitness_pickle(): + X, y = make_classification(random_state=0) + + clf = HistGradientBoostingClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def pickle_dump_with_different_bitness(): + f = io.BytesIO() + p = pickle.Pickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness + + p.dump(clf) + f.seek(0) + return f + + # Simulate loading a pickle of the same model trained on a platform with different + # bitness that than the platform it will be used to make predictions on: + new_clf = pickle.load(pickle_dump_with_different_bitness()) + new_score = new_clf.score(X, y) + assert score == pytest.approx(new_score) + + +def test_different_bitness_joblib_pickle(): + # Make sure that a platform specific pickle generated on a 64 bit + # platform can be converted at pickle load time into an estimator + # with Cython code that works with the host's native integer precision + # to index nodes in the tree data structure when the host is a 32 bit + # platform (and vice versa). + # + # This is in particular useful to be able to train a model on a 64 bit Linux + # server and deploy the model as part of a (32 bit) WASM in-browser + # application using pyodide. + X, y = make_classification(random_state=0) + + clf = HistGradientBoostingClassifier(random_state=0, max_depth=3) + clf.fit(X, y) + score = clf.score(X, y) + + def joblib_dump_with_different_bitness(): + f = io.BytesIO() + p = NumpyPickler(f) + p.dispatch_table = copyreg.dispatch_table.copy() + p.dispatch_table[TreePredictor] = reduce_predictor_with_different_bitness + + p.dump(clf) + f.seek(0) + return f + + new_clf = joblib.load(joblib_dump_with_different_bitness()) + new_score = new_clf.score(X, y) + assert score == pytest.approx(new_score) + + +def test_pandas_nullable_dtype(): + # Non regression test for https://github.com/scikit-learn/scikit-learn/issues/28317 + pd = pytest.importorskip("pandas") + + rng = np.random.default_rng(0) + X = pd.DataFrame({"a": rng.integers(10, size=100)}).astype(pd.Int64Dtype()) + y = rng.integers(2, size=100) + + clf = HistGradientBoostingClassifier() + clf.fit(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py new file mode 100644 index 0000000000000000000000000000000000000000..a55cb871e3c72ea04325b0b72f7aabc419285921 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py @@ -0,0 +1,650 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal +from pytest import approx + +from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper +from sklearn.ensemble._hist_gradient_boosting.common import ( + G_H_DTYPE, + X_BINNED_DTYPE, + X_BITSET_INNER_DTYPE, + X_DTYPE, + Y_DTYPE, +) +from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower +from sklearn.preprocessing import OneHotEncoder +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads + +n_threads = _openmp_effective_n_threads() + + +def _make_training_data(n_bins=256, constant_hessian=True): + rng = np.random.RandomState(42) + n_samples = 10000 + + # Generate some test data directly binned so as to test the grower code + # independently of the binning logic. + X_binned = rng.randint(0, n_bins - 1, size=(n_samples, 2), dtype=X_BINNED_DTYPE) + X_binned = np.asfortranarray(X_binned) + + def true_decision_function(input_features): + """Ground truth decision function + + This is a very simple yet asymmetric decision tree. Therefore the + grower code should have no trouble recovering the decision function + from 10000 training samples. + """ + if input_features[0] <= n_bins // 2: + return -1 + else: + return -1 if input_features[1] <= n_bins // 3 else 1 + + target = np.array([true_decision_function(x) for x in X_binned], dtype=Y_DTYPE) + + # Assume a square loss applied to an initial model that always predicts 0 + # (hardcoded for this test): + all_gradients = target.astype(G_H_DTYPE) + shape_hessians = 1 if constant_hessian else all_gradients.shape + all_hessians = np.ones(shape=shape_hessians, dtype=G_H_DTYPE) + + return X_binned, all_gradients, all_hessians + + +def _check_children_consistency(parent, left, right): + # Make sure the samples are correctly dispatched from a parent to its + # children + assert parent.left_child is left + assert parent.right_child is right + + # each sample from the parent is propagated to one of the two children + assert len(left.sample_indices) + len(right.sample_indices) == len( + parent.sample_indices + ) + + assert set(left.sample_indices).union(set(right.sample_indices)) == set( + parent.sample_indices + ) + + # samples are sent either to the left or the right node, never to both + assert set(left.sample_indices).intersection(set(right.sample_indices)) == set() + + +@pytest.mark.parametrize( + "n_bins, constant_hessian, stopping_param, shrinkage", + [ + (11, True, "min_gain_to_split", 0.5), + (11, False, "min_gain_to_split", 1.0), + (11, True, "max_leaf_nodes", 1.0), + (11, False, "max_leaf_nodes", 0.1), + (42, True, "max_leaf_nodes", 0.01), + (42, False, "max_leaf_nodes", 1.0), + (256, True, "min_gain_to_split", 1.0), + (256, True, "max_leaf_nodes", 0.1), + ], +) +def test_grow_tree(n_bins, constant_hessian, stopping_param, shrinkage): + X_binned, all_gradients, all_hessians = _make_training_data( + n_bins=n_bins, constant_hessian=constant_hessian + ) + n_samples = X_binned.shape[0] + + if stopping_param == "max_leaf_nodes": + stopping_param = {"max_leaf_nodes": 3} + else: + stopping_param = {"min_gain_to_split": 0.01} + + grower = TreeGrower( + X_binned, + all_gradients, + all_hessians, + n_bins=n_bins, + shrinkage=shrinkage, + min_samples_leaf=1, + **stopping_param, + ) + + # The root node is not yet split, but the best possible split has + # already been evaluated: + assert grower.root.left_child is None + assert grower.root.right_child is None + + root_split = grower.root.split_info + assert root_split.feature_idx == 0 + assert root_split.bin_idx == n_bins // 2 + assert len(grower.splittable_nodes) == 1 + + # Calling split next applies the next split and computes the best split + # for each of the two newly introduced children nodes. + left_node, right_node = grower.split_next() + + # All training samples have ben split in the two nodes, approximately + # 50%/50% + _check_children_consistency(grower.root, left_node, right_node) + assert len(left_node.sample_indices) > 0.4 * n_samples + assert len(left_node.sample_indices) < 0.6 * n_samples + + if grower.min_gain_to_split > 0: + # The left node is too pure: there is no gain to split it further. + assert left_node.split_info.gain < grower.min_gain_to_split + assert left_node in grower.finalized_leaves + + # The right node can still be split further, this time on feature #1 + split_info = right_node.split_info + assert split_info.gain > 1.0 + assert split_info.feature_idx == 1 + assert split_info.bin_idx == n_bins // 3 + assert right_node.left_child is None + assert right_node.right_child is None + + # The right split has not been applied yet. Let's do it now: + assert len(grower.splittable_nodes) == 1 + right_left_node, right_right_node = grower.split_next() + _check_children_consistency(right_node, right_left_node, right_right_node) + assert len(right_left_node.sample_indices) > 0.1 * n_samples + assert len(right_left_node.sample_indices) < 0.2 * n_samples + + assert len(right_right_node.sample_indices) > 0.2 * n_samples + assert len(right_right_node.sample_indices) < 0.4 * n_samples + + # All the leafs are pure, it is not possible to split any further: + assert not grower.splittable_nodes + + grower._apply_shrinkage() + + # Check the values of the leaves: + assert grower.root.left_child.value == approx(shrinkage) + assert grower.root.right_child.left_child.value == approx(shrinkage) + assert grower.root.right_child.right_child.value == approx(-shrinkage, rel=1e-3) + + +def test_predictor_from_grower(): + # Build a tree on the toy 3-leaf dataset to extract the predictor. + n_bins = 256 + X_binned, all_gradients, all_hessians = _make_training_data(n_bins=n_bins) + grower = TreeGrower( + X_binned, + all_gradients, + all_hessians, + n_bins=n_bins, + shrinkage=1.0, + max_leaf_nodes=3, + min_samples_leaf=5, + ) + grower.grow() + assert grower.n_nodes == 5 # (2 decision nodes + 3 leaves) + + # Check that the node structure can be converted into a predictor + # object to perform predictions at scale + # We pass undefined binning_thresholds because we won't use predict anyway + predictor = grower.make_predictor( + binning_thresholds=np.zeros((X_binned.shape[1], n_bins)) + ) + assert predictor.nodes.shape[0] == 5 + assert predictor.nodes["is_leaf"].sum() == 3 + + # Probe some predictions for each leaf of the tree + # each group of 3 samples corresponds to a condition in _make_training_data + input_data = np.array( + [ + [0, 0], + [42, 99], + [128, 254], + [129, 0], + [129, 85], + [254, 85], + [129, 86], + [129, 254], + [242, 100], + ], + dtype=np.uint8, + ) + missing_values_bin_idx = n_bins - 1 + predictions = predictor.predict_binned( + input_data, missing_values_bin_idx, n_threads + ) + expected_targets = [1, 1, 1, 1, 1, 1, -1, -1, -1] + assert np.allclose(predictions, expected_targets) + + # Check that training set can be recovered exactly: + predictions = predictor.predict_binned(X_binned, missing_values_bin_idx, n_threads) + assert np.allclose(predictions, -all_gradients) + + +@pytest.mark.parametrize( + "n_samples, min_samples_leaf, n_bins, constant_hessian, noise", + [ + (11, 10, 7, True, 0), + (13, 10, 42, False, 0), + (56, 10, 255, True, 0.1), + (101, 3, 7, True, 0), + (200, 42, 42, False, 0), + (300, 55, 255, True, 0.1), + (300, 301, 255, True, 0.1), + ], +) +def test_min_samples_leaf(n_samples, min_samples_leaf, n_bins, constant_hessian, noise): + rng = np.random.RandomState(seed=0) + # data = linear target, 3 features, 1 irrelevant. + X = rng.normal(size=(n_samples, 3)) + y = X[:, 0] - X[:, 1] + if noise: + y_scale = y.std() + y += rng.normal(scale=noise, size=n_samples) * y_scale + mapper = _BinMapper(n_bins=n_bins) + X = mapper.fit_transform(X) + + all_gradients = y.astype(G_H_DTYPE) + shape_hessian = 1 if constant_hessian else all_gradients.shape + all_hessians = np.ones(shape=shape_hessian, dtype=G_H_DTYPE) + grower = TreeGrower( + X, + all_gradients, + all_hessians, + n_bins=n_bins, + shrinkage=1.0, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=n_samples, + ) + grower.grow() + predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_) + + if n_samples >= min_samples_leaf: + for node in predictor.nodes: + if node["is_leaf"]: + assert node["count"] >= min_samples_leaf + else: + assert predictor.nodes.shape[0] == 1 + assert predictor.nodes[0]["is_leaf"] + assert predictor.nodes[0]["count"] == n_samples + + +@pytest.mark.parametrize("n_samples, min_samples_leaf", [(99, 50), (100, 50)]) +def test_min_samples_leaf_root(n_samples, min_samples_leaf): + # Make sure root node isn't split if n_samples is not at least twice + # min_samples_leaf + rng = np.random.RandomState(seed=0) + + n_bins = 256 + + # data = linear target, 3 features, 1 irrelevant. + X = rng.normal(size=(n_samples, 3)) + y = X[:, 0] - X[:, 1] + mapper = _BinMapper(n_bins=n_bins) + X = mapper.fit_transform(X) + + all_gradients = y.astype(G_H_DTYPE) + all_hessians = np.ones(shape=1, dtype=G_H_DTYPE) + grower = TreeGrower( + X, + all_gradients, + all_hessians, + n_bins=n_bins, + shrinkage=1.0, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=n_samples, + ) + grower.grow() + if n_samples >= min_samples_leaf * 2: + assert len(grower.finalized_leaves) >= 2 + else: + assert len(grower.finalized_leaves) == 1 + + +def assert_is_stump(grower): + # To assert that stumps are created when max_depth=1 + for leaf in (grower.root.left_child, grower.root.right_child): + assert leaf.left_child is None + assert leaf.right_child is None + + +@pytest.mark.parametrize("max_depth", [1, 2, 3]) +def test_max_depth(max_depth): + # Make sure max_depth parameter works as expected + rng = np.random.RandomState(seed=0) + + n_bins = 256 + n_samples = 1000 + + # data = linear target, 3 features, 1 irrelevant. + X = rng.normal(size=(n_samples, 3)) + y = X[:, 0] - X[:, 1] + mapper = _BinMapper(n_bins=n_bins) + X = mapper.fit_transform(X) + + all_gradients = y.astype(G_H_DTYPE) + all_hessians = np.ones(shape=1, dtype=G_H_DTYPE) + grower = TreeGrower(X, all_gradients, all_hessians, max_depth=max_depth) + grower.grow() + + depth = max(leaf.depth for leaf in grower.finalized_leaves) + assert depth == max_depth + + if max_depth == 1: + assert_is_stump(grower) + + +def test_input_validation(): + X_binned, all_gradients, all_hessians = _make_training_data() + + X_binned_float = X_binned.astype(np.float32) + with pytest.raises(NotImplementedError, match="X_binned must be of type uint8"): + TreeGrower(X_binned_float, all_gradients, all_hessians) + + X_binned_C_array = np.ascontiguousarray(X_binned) + with pytest.raises( + ValueError, match="X_binned should be passed as Fortran contiguous array" + ): + TreeGrower(X_binned_C_array, all_gradients, all_hessians) + + +def test_init_parameters_validation(): + X_binned, all_gradients, all_hessians = _make_training_data() + with pytest.raises(ValueError, match="min_gain_to_split=-1 must be positive"): + TreeGrower(X_binned, all_gradients, all_hessians, min_gain_to_split=-1) + + with pytest.raises(ValueError, match="min_hessian_to_split=-1 must be positive"): + TreeGrower(X_binned, all_gradients, all_hessians, min_hessian_to_split=-1) + + +def test_missing_value_predict_only(): + # Make sure that missing values are supported at predict time even if they + # were not encountered in the training data: the missing values are + # assigned to whichever child has the most samples. + + rng = np.random.RandomState(0) + n_samples = 100 + X_binned = rng.randint(0, 256, size=(n_samples, 1), dtype=np.uint8) + X_binned = np.asfortranarray(X_binned) + + gradients = rng.normal(size=n_samples).astype(G_H_DTYPE) + hessians = np.ones(shape=1, dtype=G_H_DTYPE) + + grower = TreeGrower( + X_binned, gradients, hessians, min_samples_leaf=5, has_missing_values=False + ) + grower.grow() + + # We pass undefined binning_thresholds because we won't use predict anyway + predictor = grower.make_predictor( + binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1)) + ) + + # go from root to a leaf, always following node with the most samples. + # That's the path nans are supposed to take + node = predictor.nodes[0] + while not node["is_leaf"]: + left = predictor.nodes[node["left"]] + right = predictor.nodes[node["right"]] + node = left if left["count"] > right["count"] else right + + prediction_main_path = node["value"] + + # now build X_test with only nans, and make sure all predictions are equal + # to prediction_main_path + all_nans = np.full(shape=(n_samples, 1), fill_value=np.nan) + known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + f_idx_map = np.zeros(0, dtype=np.uint32) + + y_pred = predictor.predict(all_nans, known_cat_bitsets, f_idx_map, n_threads) + assert np.all(y_pred == prediction_main_path) + + +def test_split_on_nan_with_infinite_values(): + # Make sure the split on nan situations are respected even when there are + # samples with +inf values (we set the threshold to +inf when we have a + # split on nan so this test makes sure this does not introduce edge-case + # bugs). We need to use the private API so that we can also test + # predict_binned(). + + X = np.array([0, 1, np.inf, np.nan, np.nan]).reshape(-1, 1) + # the gradient values will force a split on nan situation + gradients = np.array([0, 0, 0, 100, 100], dtype=G_H_DTYPE) + hessians = np.ones(shape=1, dtype=G_H_DTYPE) + + bin_mapper = _BinMapper() + X_binned = bin_mapper.fit_transform(X) + + n_bins_non_missing = 3 + has_missing_values = True + grower = TreeGrower( + X_binned, + gradients, + hessians, + n_bins_non_missing=n_bins_non_missing, + has_missing_values=has_missing_values, + min_samples_leaf=1, + n_threads=n_threads, + ) + + grower.grow() + + predictor = grower.make_predictor(binning_thresholds=bin_mapper.bin_thresholds_) + + # sanity check: this was a split on nan + assert predictor.nodes[0]["num_threshold"] == np.inf + assert predictor.nodes[0]["bin_threshold"] == n_bins_non_missing - 1 + + known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets() + + # Make sure in particular that the +inf sample is mapped to the left child + # Note that lightgbm "fails" here and will assign the inf sample to the + # right child, even though it's a "split on nan" situation. + predictions = predictor.predict(X, known_cat_bitsets, f_idx_map, n_threads) + predictions_binned = predictor.predict_binned( + X_binned, + missing_values_bin_idx=bin_mapper.missing_values_bin_idx_, + n_threads=n_threads, + ) + np.testing.assert_allclose(predictions, -gradients) + np.testing.assert_allclose(predictions_binned, -gradients) + + +def test_grow_tree_categories(): + # Check that the grower produces the right predictor tree when a split is + # categorical + X_binned = np.array([[0, 1] * 11 + [1]], dtype=X_BINNED_DTYPE).T + X_binned = np.asfortranarray(X_binned) + + all_gradients = np.array([10, 1] * 11 + [1], dtype=G_H_DTYPE) + all_hessians = np.ones(1, dtype=G_H_DTYPE) + is_categorical = np.ones(1, dtype=np.uint8) + + grower = TreeGrower( + X_binned, + all_gradients, + all_hessians, + n_bins=4, + shrinkage=1.0, + min_samples_leaf=1, + is_categorical=is_categorical, + n_threads=n_threads, + ) + grower.grow() + assert grower.n_nodes == 3 + + categories = [np.array([4, 9], dtype=X_DTYPE)] + predictor = grower.make_predictor(binning_thresholds=categories) + root = predictor.nodes[0] + assert root["count"] == 23 + assert root["depth"] == 0 + assert root["is_categorical"] + + left, right = predictor.nodes[root["left"]], predictor.nodes[root["right"]] + + # arbitrary validation, but this means ones go to the left. + assert left["count"] >= right["count"] + + # check binned category value (1) + expected_binned_cat_bitset = [2**1] + [0] * 7 + binned_cat_bitset = predictor.binned_left_cat_bitsets + assert_array_equal(binned_cat_bitset[0], expected_binned_cat_bitset) + + # check raw category value (9) + expected_raw_cat_bitsets = [2**9] + [0] * 7 + raw_cat_bitsets = predictor.raw_left_cat_bitsets + assert_array_equal(raw_cat_bitsets[0], expected_raw_cat_bitsets) + + # Note that since there was no missing values during training, the missing + # values aren't part of the bitsets. However, we expect the missing values + # to go to the biggest child (i.e. the left one). + # The left child has a value of -1 = negative gradient. + assert root["missing_go_to_left"] + + # make sure binned missing values are mapped to the left child during + # prediction + prediction_binned = predictor.predict_binned( + np.asarray([[6]]).astype(X_BINNED_DTYPE), + missing_values_bin_idx=6, + n_threads=n_threads, + ) + assert_allclose(prediction_binned, [-1]) # negative gradient + + # make sure raw missing values are mapped to the left child during + # prediction + known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32) # ignored anyway + f_idx_map = np.array([0], dtype=np.uint32) + prediction = predictor.predict( + np.array([[np.nan]]), known_cat_bitsets, f_idx_map, n_threads + ) + assert_allclose(prediction, [-1]) + + +@pytest.mark.parametrize("min_samples_leaf", (1, 20)) +@pytest.mark.parametrize("n_unique_categories", (2, 10, 100)) +@pytest.mark.parametrize("target", ("binary", "random", "equal")) +def test_ohe_equivalence(min_samples_leaf, n_unique_categories, target): + # Make sure that native categorical splits are equivalent to using a OHE, + # when given enough depth + + rng = np.random.RandomState(0) + n_samples = 10_000 + X_binned = rng.randint(0, n_unique_categories, size=(n_samples, 1), dtype=np.uint8) + + X_ohe = OneHotEncoder(sparse_output=False).fit_transform(X_binned) + X_ohe = np.asfortranarray(X_ohe).astype(np.uint8) + + if target == "equal": + gradients = X_binned.reshape(-1) + elif target == "binary": + gradients = (X_binned % 2).reshape(-1) + else: + gradients = rng.randn(n_samples) + gradients = gradients.astype(G_H_DTYPE) + + hessians = np.ones(shape=1, dtype=G_H_DTYPE) + + grower_params = { + "min_samples_leaf": min_samples_leaf, + "max_depth": None, + "max_leaf_nodes": None, + } + + grower = TreeGrower( + X_binned, gradients, hessians, is_categorical=[True], **grower_params + ) + grower.grow() + # we pass undefined bin_thresholds because we won't use predict() + predictor = grower.make_predictor( + binning_thresholds=np.zeros((1, n_unique_categories)) + ) + preds = predictor.predict_binned( + X_binned, missing_values_bin_idx=255, n_threads=n_threads + ) + + grower_ohe = TreeGrower(X_ohe, gradients, hessians, **grower_params) + grower_ohe.grow() + predictor_ohe = grower_ohe.make_predictor( + binning_thresholds=np.zeros((X_ohe.shape[1], n_unique_categories)) + ) + preds_ohe = predictor_ohe.predict_binned( + X_ohe, missing_values_bin_idx=255, n_threads=n_threads + ) + + assert predictor.get_max_depth() <= predictor_ohe.get_max_depth() + if target == "binary" and n_unique_categories > 2: + # OHE needs more splits to achieve the same predictions + assert predictor.get_max_depth() < predictor_ohe.get_max_depth() + + np.testing.assert_allclose(preds, preds_ohe) + + +def test_grower_interaction_constraints(): + """Check that grower respects interaction constraints.""" + n_features = 6 + interaction_cst = [{0, 1}, {1, 2}, {3, 4, 5}] + n_samples = 10 + n_bins = 6 + root_feature_splits = [] + + def get_all_children(node): + res = [] + if node.is_leaf: + return res + for n in [node.left_child, node.right_child]: + res.append(n) + res.extend(get_all_children(n)) + return res + + for seed in range(20): + rng = np.random.RandomState(seed) + + X_binned = rng.randint( + 0, n_bins - 1, size=(n_samples, n_features), dtype=X_BINNED_DTYPE + ) + X_binned = np.asfortranarray(X_binned) + gradients = rng.normal(size=n_samples).astype(G_H_DTYPE) + hessians = np.ones(shape=1, dtype=G_H_DTYPE) + + grower = TreeGrower( + X_binned, + gradients, + hessians, + n_bins=n_bins, + min_samples_leaf=1, + interaction_cst=interaction_cst, + n_threads=n_threads, + ) + grower.grow() + + root_feature_idx = grower.root.split_info.feature_idx + root_feature_splits.append(root_feature_idx) + + feature_idx_to_constraint_set = { + 0: {0, 1}, + 1: {0, 1, 2}, + 2: {1, 2}, + 3: {3, 4, 5}, + 4: {3, 4, 5}, + 5: {3, 4, 5}, + } + + root_constraint_set = feature_idx_to_constraint_set[root_feature_idx] + for node in (grower.root.left_child, grower.root.right_child): + # Root's children's allowed_features must be the root's constraints set. + assert_array_equal(node.allowed_features, list(root_constraint_set)) + for node in get_all_children(grower.root): + if node.is_leaf: + continue + # Ensure that each node uses a subset of features of its parent node. + parent_interaction_cst_indices = set(node.interaction_cst_indices) + right_interactions_cst_indices = set( + node.right_child.interaction_cst_indices + ) + left_interactions_cst_indices = set(node.left_child.interaction_cst_indices) + + assert right_interactions_cst_indices.issubset( + parent_interaction_cst_indices + ) + assert left_interactions_cst_indices.issubset( + parent_interaction_cst_indices + ) + # The features used for split must have been present in the root's + # constraint set. + assert node.split_info.feature_idx in root_constraint_set + + # Make sure that every feature is used at least once as split for the root node. + assert ( + len(set(root_feature_splits)) + == len(set().union(*interaction_cst)) + == n_features + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py new file mode 100644 index 0000000000000000000000000000000000000000..22375c7d4ea2c378bf7a45ad619f92c187d40984 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py @@ -0,0 +1,239 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.ensemble._hist_gradient_boosting.common import ( + G_H_DTYPE, + HISTOGRAM_DTYPE, + X_BINNED_DTYPE, +) +from sklearn.ensemble._hist_gradient_boosting.histogram import ( + _build_histogram, + _build_histogram_naive, + _build_histogram_no_hessian, + _build_histogram_root, + _build_histogram_root_no_hessian, + _subtract_histograms, +) + + +@pytest.mark.parametrize("build_func", [_build_histogram_naive, _build_histogram]) +def test_build_histogram(build_func): + binned_feature = np.array([0, 2, 0, 1, 2, 0, 2, 1], dtype=X_BINNED_DTYPE) + + # Small sample_indices (below unrolling threshold) + ordered_gradients = np.array([0, 1, 3], dtype=G_H_DTYPE) + ordered_hessians = np.array([1, 1, 2], dtype=G_H_DTYPE) + + sample_indices = np.array([0, 2, 3], dtype=np.uint32) + hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE) + build_func( + 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist + ) + hist = hist[0] + assert_array_equal(hist["count"], [2, 1, 0]) + assert_allclose(hist["sum_gradients"], [1, 3, 0]) + assert_allclose(hist["sum_hessians"], [2, 2, 0]) + + # Larger sample_indices (above unrolling threshold) + sample_indices = np.array([0, 2, 3, 6, 7], dtype=np.uint32) + ordered_gradients = np.array([0, 1, 3, 0, 1], dtype=G_H_DTYPE) + ordered_hessians = np.array([1, 1, 2, 1, 0], dtype=G_H_DTYPE) + + hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE) + build_func( + 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist + ) + hist = hist[0] + assert_array_equal(hist["count"], [2, 2, 1]) + assert_allclose(hist["sum_gradients"], [1, 4, 0]) + assert_allclose(hist["sum_hessians"], [2, 2, 1]) + + +def test_histogram_sample_order_independence(): + # Make sure the order of the samples has no impact on the histogram + # computations + rng = np.random.RandomState(42) + n_sub_samples = 100 + n_samples = 1000 + n_bins = 256 + + binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=X_BINNED_DTYPE) + sample_indices = rng.choice( + np.arange(n_samples, dtype=np.uint32), n_sub_samples, replace=False + ) + ordered_gradients = rng.randn(n_sub_samples).astype(G_H_DTYPE) + hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + _build_histogram_no_hessian( + 0, sample_indices, binned_feature, ordered_gradients, hist_gc + ) + + ordered_hessians = rng.exponential(size=n_sub_samples).astype(G_H_DTYPE) + hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + _build_histogram( + 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist_ghc + ) + + permutation = rng.permutation(n_sub_samples) + hist_gc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + _build_histogram_no_hessian( + 0, + sample_indices[permutation], + binned_feature, + ordered_gradients[permutation], + hist_gc_perm, + ) + + hist_ghc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + _build_histogram( + 0, + sample_indices[permutation], + binned_feature, + ordered_gradients[permutation], + ordered_hessians[permutation], + hist_ghc_perm, + ) + + hist_gc = hist_gc[0] + hist_ghc = hist_ghc[0] + hist_gc_perm = hist_gc_perm[0] + hist_ghc_perm = hist_ghc_perm[0] + + assert_allclose(hist_gc["sum_gradients"], hist_gc_perm["sum_gradients"]) + assert_array_equal(hist_gc["count"], hist_gc_perm["count"]) + + assert_allclose(hist_ghc["sum_gradients"], hist_ghc_perm["sum_gradients"]) + assert_allclose(hist_ghc["sum_hessians"], hist_ghc_perm["sum_hessians"]) + assert_array_equal(hist_ghc["count"], hist_ghc_perm["count"]) + + +@pytest.mark.parametrize("constant_hessian", [True, False]) +def test_unrolled_equivalent_to_naive(constant_hessian): + # Make sure the different unrolled histogram computations give the same + # results as the naive one. + rng = np.random.RandomState(42) + n_samples = 10 + n_bins = 5 + sample_indices = np.arange(n_samples).astype(np.uint32) + binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8) + ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE) + if constant_hessian: + ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE) + else: + ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE) + + hist_gc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + hist_ghc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + hist_naive = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + + _build_histogram_root_no_hessian(0, binned_feature, ordered_gradients, hist_gc_root) + _build_histogram_root( + 0, binned_feature, ordered_gradients, ordered_hessians, hist_ghc_root + ) + _build_histogram_no_hessian( + 0, sample_indices, binned_feature, ordered_gradients, hist_gc + ) + _build_histogram( + 0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist_ghc + ) + _build_histogram_naive( + 0, + sample_indices, + binned_feature, + ordered_gradients, + ordered_hessians, + hist_naive, + ) + + hist_naive = hist_naive[0] + hist_gc_root = hist_gc_root[0] + hist_ghc_root = hist_ghc_root[0] + hist_gc = hist_gc[0] + hist_ghc = hist_ghc[0] + for hist in (hist_gc_root, hist_ghc_root, hist_gc, hist_ghc): + assert_array_equal(hist["count"], hist_naive["count"]) + assert_allclose(hist["sum_gradients"], hist_naive["sum_gradients"]) + for hist in (hist_ghc_root, hist_ghc): + assert_allclose(hist["sum_hessians"], hist_naive["sum_hessians"]) + for hist in (hist_gc_root, hist_gc): + assert_array_equal(hist["sum_hessians"], np.zeros(n_bins)) + + +@pytest.mark.parametrize("constant_hessian", [True, False]) +def test_hist_subtraction(constant_hessian): + # Make sure the histogram subtraction trick gives the same result as the + # classical method. + rng = np.random.RandomState(42) + n_samples = 10 + n_bins = 5 + sample_indices = np.arange(n_samples).astype(np.uint32) + binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8) + ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE) + if constant_hessian: + ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE) + else: + ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE) + + hist_parent = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + if constant_hessian: + _build_histogram_no_hessian( + 0, sample_indices, binned_feature, ordered_gradients, hist_parent + ) + else: + _build_histogram( + 0, + sample_indices, + binned_feature, + ordered_gradients, + ordered_hessians, + hist_parent, + ) + + mask = rng.randint(0, 2, n_samples).astype(bool) + + sample_indices_left = sample_indices[mask] + ordered_gradients_left = ordered_gradients[mask] + ordered_hessians_left = ordered_hessians[mask] + hist_left = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + if constant_hessian: + _build_histogram_no_hessian( + 0, sample_indices_left, binned_feature, ordered_gradients_left, hist_left + ) + else: + _build_histogram( + 0, + sample_indices_left, + binned_feature, + ordered_gradients_left, + ordered_hessians_left, + hist_left, + ) + + sample_indices_right = sample_indices[~mask] + ordered_gradients_right = ordered_gradients[~mask] + ordered_hessians_right = ordered_hessians[~mask] + hist_right = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE) + if constant_hessian: + _build_histogram_no_hessian( + 0, sample_indices_right, binned_feature, ordered_gradients_right, hist_right + ) + else: + _build_histogram( + 0, + sample_indices_right, + binned_feature, + ordered_gradients_right, + ordered_hessians_right, + hist_right, + ) + + hist_left_sub = np.copy(hist_parent) + hist_right_sub = np.copy(hist_parent) + _subtract_histograms(0, n_bins, hist_left_sub, hist_right) + _subtract_histograms(0, n_bins, hist_right_sub, hist_left) + + for key in ("count", "sum_hessians", "sum_gradients"): + assert_allclose(hist_left[key], hist_left_sub[key], rtol=1e-6) + assert_allclose(hist_right[key], hist_right_sub[key], rtol=1e-6) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py new file mode 100644 index 0000000000000000000000000000000000000000..7782b5b32eb6875ceaea2c566cb5de769ee1e74b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py @@ -0,0 +1,435 @@ +import re + +import numpy as np +import pytest + +from sklearn.ensemble import ( + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from sklearn.ensemble._hist_gradient_boosting.common import ( + G_H_DTYPE, + X_BINNED_DTYPE, + MonotonicConstraint, +) +from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower +from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder +from sklearn.ensemble._hist_gradient_boosting.splitting import ( + Splitter, + compute_node_value, +) +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads +from sklearn.utils._testing import _convert_container + +n_threads = _openmp_effective_n_threads() + + +def is_increasing(a): + return (np.diff(a) >= 0.0).all() + + +def is_decreasing(a): + return (np.diff(a) <= 0.0).all() + + +def assert_leaves_values_monotonic(predictor, monotonic_cst): + # make sure leaves values (from left to right) are either all increasing + # or all decreasing (or neither) depending on the monotonic constraint. + nodes = predictor.nodes + + def get_leaves_values(): + """get leaves values from left to right""" + values = [] + + def depth_first_collect_leaf_values(node_idx): + node = nodes[node_idx] + if node["is_leaf"]: + values.append(node["value"]) + return + depth_first_collect_leaf_values(node["left"]) + depth_first_collect_leaf_values(node["right"]) + + depth_first_collect_leaf_values(0) # start at root (0) + return values + + values = get_leaves_values() + + if monotonic_cst == MonotonicConstraint.NO_CST: + # some increasing, some decreasing + assert not is_increasing(values) and not is_decreasing(values) + elif monotonic_cst == MonotonicConstraint.POS: + # all increasing + assert is_increasing(values) + else: # NEG + # all decreasing + assert is_decreasing(values) + + +def assert_children_values_monotonic(predictor, monotonic_cst): + # Make sure siblings values respect the monotonic constraints. Left should + # be lower (resp greater) than right child if constraint is POS (resp. + # NEG). + # Note that this property alone isn't enough to ensure full monotonicity, + # since we also need to guanrantee that all the descendents of the left + # child won't be greater (resp. lower) than the right child, or its + # descendents. That's why we need to bound the predicted values (this is + # tested in assert_children_values_bounded) + nodes = predictor.nodes + left_lower = [] + left_greater = [] + for node in nodes: + if node["is_leaf"]: + continue + + left_idx = node["left"] + right_idx = node["right"] + + if nodes[left_idx]["value"] < nodes[right_idx]["value"]: + left_lower.append(node) + elif nodes[left_idx]["value"] > nodes[right_idx]["value"]: + left_greater.append(node) + + if monotonic_cst == MonotonicConstraint.NO_CST: + assert left_lower and left_greater + elif monotonic_cst == MonotonicConstraint.POS: + assert left_lower and not left_greater + else: # NEG + assert not left_lower and left_greater + + +def assert_children_values_bounded(grower, monotonic_cst): + # Make sure that the values of the children of a node are bounded by the + # middle value between that node and its sibling (if there is a monotonic + # constraint). + # As a bonus, we also check that the siblings values are properly ordered + # which is slightly redundant with assert_children_values_monotonic (but + # this check is done on the grower nodes whereas + # assert_children_values_monotonic is done on the predictor nodes) + + if monotonic_cst == MonotonicConstraint.NO_CST: + return + + def recursively_check_children_node_values(node, right_sibling=None): + if node.is_leaf: + return + if right_sibling is not None: + middle = (node.value + right_sibling.value) / 2 + if monotonic_cst == MonotonicConstraint.POS: + assert node.left_child.value <= node.right_child.value <= middle + if not right_sibling.is_leaf: + assert ( + middle + <= right_sibling.left_child.value + <= right_sibling.right_child.value + ) + else: # NEG + assert node.left_child.value >= node.right_child.value >= middle + if not right_sibling.is_leaf: + assert ( + middle + >= right_sibling.left_child.value + >= right_sibling.right_child.value + ) + + recursively_check_children_node_values( + node.left_child, right_sibling=node.right_child + ) + recursively_check_children_node_values(node.right_child) + + recursively_check_children_node_values(grower.root) + + +@pytest.mark.parametrize("seed", range(3)) +@pytest.mark.parametrize( + "monotonic_cst", + ( + MonotonicConstraint.NO_CST, + MonotonicConstraint.POS, + MonotonicConstraint.NEG, + ), +) +def test_nodes_values(monotonic_cst, seed): + # Build a single tree with only one feature, and make sure the nodes + # values respect the monotonic constraints. + + # Considering the following tree with a monotonic POS constraint, we + # should have: + # + # root + # / \ + # 5 10 # middle = 7.5 + # / \ / \ + # a b c d + # + # a <= b and c <= d (assert_children_values_monotonic) + # a, b <= middle <= c, d (assert_children_values_bounded) + # a <= b <= c <= d (assert_leaves_values_monotonic) + # + # The last one is a consequence of the others, but can't hurt to check + + rng = np.random.RandomState(seed) + n_samples = 1000 + n_features = 1 + X_binned = rng.randint(0, 255, size=(n_samples, n_features), dtype=np.uint8) + X_binned = np.asfortranarray(X_binned) + + gradients = rng.normal(size=n_samples).astype(G_H_DTYPE) + hessians = np.ones(shape=1, dtype=G_H_DTYPE) + + grower = TreeGrower( + X_binned, gradients, hessians, monotonic_cst=[monotonic_cst], shrinkage=0.1 + ) + grower.grow() + + # grow() will shrink the leaves values at the very end. For our comparison + # tests, we need to revert the shrinkage of the leaves, else we would + # compare the value of a leaf (shrunk) with a node (not shrunk) and the + # test would not be correct. + for leave in grower.finalized_leaves: + leave.value /= grower.shrinkage + + # We pass undefined binning_thresholds because we won't use predict anyway + predictor = grower.make_predictor( + binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1)) + ) + + # The consistency of the bounds can only be checked on the tree grower + # as the node bounds are not copied into the predictor tree. The + # consistency checks on the values of node children and leaves can be + # done either on the grower tree or on the predictor tree. We only + # do those checks on the predictor tree as the latter is derived from + # the former. + assert_children_values_monotonic(predictor, monotonic_cst) + assert_children_values_bounded(grower, monotonic_cst) + assert_leaves_values_monotonic(predictor, monotonic_cst) + + +@pytest.mark.parametrize("use_feature_names", (True, False)) +def test_predictions(global_random_seed, use_feature_names): + # Train a model with a POS constraint on the first feature and a NEG + # constraint on the second feature, and make sure the constraints are + # respected by checking the predictions. + # test adapted from lightgbm's test_monotone_constraint(), itself inspired + # by https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html + + rng = np.random.RandomState(global_random_seed) + + n_samples = 1000 + f_0 = rng.rand(n_samples) # positive correlation with y + f_1 = rng.rand(n_samples) # negative correslation with y + X = np.c_[f_0, f_1] + columns_name = ["f_0", "f_1"] + constructor_name = "dataframe" if use_feature_names else "array" + X = _convert_container(X, constructor_name, columns_name=columns_name) + + noise = rng.normal(loc=0.0, scale=0.01, size=n_samples) + y = 5 * f_0 + np.sin(10 * np.pi * f_0) - 5 * f_1 - np.cos(10 * np.pi * f_1) + noise + + if use_feature_names: + monotonic_cst = {"f_0": +1, "f_1": -1} + else: + monotonic_cst = [+1, -1] + + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + gbdt.fit(X, y) + + linspace = np.linspace(0, 1, 100) + sin = np.sin(linspace) + constant = np.full_like(linspace, fill_value=0.5) + + # We now assert the predictions properly respect the constraints, on each + # feature. When testing for a feature we need to set the other one to a + # constant, because the monotonic constraints are only a "all else being + # equal" type of constraints: + # a constraint on the first feature only means that + # x0 < x0' => f(x0, x1) < f(x0', x1) + # while x1 stays constant. + # The constraint does not guanrantee that + # x0 < x0' => f(x0, x1) < f(x0', x1') + + # First feature (POS) + # assert pred is all increasing when f_0 is all increasing + X = np.c_[linspace, constant] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert is_increasing(pred) + # assert pred actually follows the variations of f_0 + X = np.c_[sin, constant] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert np.all((np.diff(pred) >= 0) == (np.diff(sin) >= 0)) + + # Second feature (NEG) + # assert pred is all decreasing when f_1 is all increasing + X = np.c_[constant, linspace] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert is_decreasing(pred) + # assert pred actually follows the inverse variations of f_1 + X = np.c_[constant, sin] + X = _convert_container(X, constructor_name, columns_name=columns_name) + pred = gbdt.predict(X) + assert ((np.diff(pred) <= 0) == (np.diff(sin) >= 0)).all() + + +def test_input_error(): + X = [[1, 2], [2, 3], [3, 4]] + y = [0, 1, 2] + + gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, 0, -1]) + with pytest.raises( + ValueError, match=re.escape("monotonic_cst has shape (3,) but the input data") + ): + gbdt.fit(X, y) + + for monotonic_cst in ([1, 3], [1, -3], [0.3, -0.7]): + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "must be an array-like of -1, 0 or 1. Observed values:" + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + gbdt = HistGradientBoostingClassifier(monotonic_cst=[0, 1]) + with pytest.raises( + ValueError, + match="monotonic constraints are not supported for multiclass classification", + ): + gbdt.fit(X, y) + + +def test_input_error_related_to_feature_names(): + pd = pytest.importorskip("pandas") + X = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]}) + y = np.array([0, 1, 0]) + + monotonic_cst = {"d": 1, "a": 1, "c": -1} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "monotonic_cst contains 2 unexpected feature names: ['c', 'd']." + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + monotonic_cst = {k: 1 for k in "abcdefghijklmnopqrstuvwxyz"} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "monotonic_cst contains 24 unexpected feature names: " + "['c', 'd', 'e', 'f', 'g', '...']." + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + monotonic_cst = {"a": 1} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape( + "HistGradientBoostingRegressor was not fitted on data with feature " + "names. Pass monotonic_cst as an integer array instead." + ) + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X.values, y) + + monotonic_cst = {"b": -1, "a": "+"} + gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst) + expected_msg = re.escape("monotonic_cst['a'] must be either -1, 0 or 1. Got '+'.") + with pytest.raises(ValueError, match=expected_msg): + gbdt.fit(X, y) + + +def test_bounded_value_min_gain_to_split(): + # The purpose of this test is to show that when computing the gain at a + # given split, the value of the current node should be properly bounded to + # respect the monotonic constraints, because it strongly interacts with + # min_gain_to_split. We build a simple example where gradients are [1, 1, + # 100, 1, 1] (hessians are all ones). The best split happens on the 3rd + # bin, and depending on whether the value of the node is bounded or not, + # the min_gain_to_split constraint is or isn't satisfied. + l2_regularization = 0 + min_hessian_to_split = 0 + min_samples_leaf = 1 + n_bins = n_samples = 5 + X_binned = np.arange(n_samples).reshape(-1, 1).astype(X_BINNED_DTYPE) + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_hessians = np.ones(n_samples, dtype=G_H_DTYPE) + all_gradients = np.array([1, 1, 100, 1, 1], dtype=G_H_DTYPE) + sum_gradients = all_gradients.sum() + sum_hessians = all_hessians.sum() + hessians_are_constant = False + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + children_lower_bound, children_upper_bound = -np.inf, np.inf + + min_gain_to_split = 2000 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + + # Since the gradient array is [1, 1, 100, 1, 1] + # the max possible gain happens on the 3rd bin (or equivalently in the 2nd) + # and is equal to about 1307, which less than min_gain_to_split = 2000, so + # the node is considered unsplittable (gain = -1) + current_lower_bound, current_upper_bound = -np.inf, np.inf + value = compute_node_value( + sum_gradients, + sum_hessians, + current_lower_bound, + current_upper_bound, + l2_regularization, + ) + # the unbounded value is equal to -sum_gradients / sum_hessians + assert value == pytest.approx(-104 / 5) + split_info = splitter.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + lower_bound=children_lower_bound, + upper_bound=children_upper_bound, + ) + assert split_info.gain == -1 # min_gain_to_split not respected + + # here again the max possible gain is on the 3rd bin but we now cap the + # value of the node into [-10, inf]. + # This means the gain is now about 2430 which is more than the + # min_gain_to_split constraint. + current_lower_bound, current_upper_bound = -10, np.inf + value = compute_node_value( + sum_gradients, + sum_hessians, + current_lower_bound, + current_upper_bound, + l2_regularization, + ) + assert value == -10 + split_info = splitter.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + lower_bound=children_lower_bound, + upper_bound=children_upper_bound, + ) + assert split_info.gain > min_gain_to_split diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3c9ae81bac2d498c460bfb5f2173f8c48693d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py @@ -0,0 +1,187 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.datasets import make_regression +from sklearn.ensemble._hist_gradient_boosting._bitset import ( + set_bitset_memoryview, + set_raw_bitset_from_binned_bitset, +) +from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper +from sklearn.ensemble._hist_gradient_boosting.common import ( + ALMOST_INF, + G_H_DTYPE, + PREDICTOR_RECORD_DTYPE, + X_BINNED_DTYPE, + X_BITSET_INNER_DTYPE, + X_DTYPE, +) +from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower +from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor +from sklearn.metrics import r2_score +from sklearn.model_selection import train_test_split +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads + +n_threads = _openmp_effective_n_threads() + + +@pytest.mark.parametrize("n_bins", [200, 256]) +def test_regression_dataset(n_bins): + X, y = make_regression( + n_samples=500, n_features=10, n_informative=5, random_state=42 + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + + mapper = _BinMapper(n_bins=n_bins, random_state=42) + X_train_binned = mapper.fit_transform(X_train) + + # Init gradients and hessians to that of least squares loss + gradients = -y_train.astype(G_H_DTYPE) + hessians = np.ones(1, dtype=G_H_DTYPE) + + min_samples_leaf = 10 + max_leaf_nodes = 30 + grower = TreeGrower( + X_train_binned, + gradients, + hessians, + min_samples_leaf=min_samples_leaf, + max_leaf_nodes=max_leaf_nodes, + n_bins=n_bins, + n_bins_non_missing=mapper.n_bins_non_missing_, + ) + grower.grow() + + predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_) + + known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + f_idx_map = np.zeros(0, dtype=np.uint32) + + y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map, n_threads) + assert r2_score(y_train, y_pred_train) > 0.82 + + y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map, n_threads) + assert r2_score(y_test, y_pred_test) > 0.67 + + +@pytest.mark.parametrize( + "num_threshold, expected_predictions", + [ + (-np.inf, [0, 1, 1, 1]), + (10, [0, 0, 1, 1]), + (20, [0, 0, 0, 1]), + (ALMOST_INF, [0, 0, 0, 1]), + (np.inf, [0, 0, 0, 0]), + ], +) +def test_infinite_values_and_thresholds(num_threshold, expected_predictions): + # Make sure infinite values and infinite thresholds are handled properly. + # In particular, if a value is +inf and the threshold is ALMOST_INF the + # sample should go to the right child. If the threshold is inf (split on + # nan), the +inf sample will go to the left child. + + X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1) + nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE) + + # We just construct a simple tree with 1 root and 2 children + # parent node + nodes[0]["left"] = 1 + nodes[0]["right"] = 2 + nodes[0]["feature_idx"] = 0 + nodes[0]["num_threshold"] = num_threshold + + # left child + nodes[1]["is_leaf"] = True + nodes[1]["value"] = 0 + + # right child + nodes[2]["is_leaf"] = True + nodes[2]["value"] = 1 + + binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE) + f_idx_map = np.zeros(0, dtype=np.uint32) + + predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets) + predictions = predictor.predict(X, known_cat_bitset, f_idx_map, n_threads) + + assert np.all(predictions == expected_predictions) + + +@pytest.mark.parametrize( + "bins_go_left, expected_predictions", + [ + ([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]), + ([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]), + ([3, 5, 6], [0, 0, 0, 1, 0, 1]), + ], +) +def test_categorical_predictor(bins_go_left, expected_predictions): + # Test predictor outputs are correct with categorical features + + X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T + categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE) + + bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE) + + # We just construct a simple tree with 1 root and 2 children + # parent node + nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE) + nodes[0]["left"] = 1 + nodes[0]["right"] = 2 + nodes[0]["feature_idx"] = 0 + nodes[0]["is_categorical"] = True + nodes[0]["missing_go_to_left"] = True + + # left child + nodes[1]["is_leaf"] = True + nodes[1]["value"] = 1 + + # right child + nodes[2]["is_leaf"] = True + nodes[2]["value"] = 0 + + binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE) + raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE) + for go_left in bins_go_left: + set_bitset_memoryview(binned_cat_bitsets[0], go_left) + + set_raw_bitset_from_binned_bitset( + raw_categorical_bitsets[0], binned_cat_bitsets[0], categories + ) + + predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets) + + # Check binned data gives correct predictions + prediction_binned = predictor.predict_binned( + X_binned, missing_values_bin_idx=6, n_threads=n_threads + ) + assert_allclose(prediction_binned, expected_predictions) + + # manually construct bitset + known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32) + known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32) + f_idx_map = np.array([0], dtype=np.uint32) + + # Check with un-binned data + predictions = predictor.predict( + categories.reshape(-1, 1), known_cat_bitsets, f_idx_map, n_threads + ) + assert_allclose(predictions, expected_predictions) + + # Check missing goes left because missing_values_bin_idx=6 + X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T + predictions = predictor.predict_binned( + X_binned_missing, missing_values_bin_idx=6, n_threads=n_threads + ) + assert_allclose(predictions, [1]) + + # missing and unknown go left + predictions = predictor.predict( + np.array([[np.nan, 17]], dtype=X_DTYPE).T, + known_cat_bitsets, + f_idx_map, + n_threads, + ) + assert_allclose(predictions, [1, 1]) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py new file mode 100644 index 0000000000000000000000000000000000000000..388697340e08b545be766c6d46cf7362371bc258 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py @@ -0,0 +1,1070 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.ensemble._hist_gradient_boosting.common import ( + G_H_DTYPE, + HISTOGRAM_DTYPE, + X_BINNED_DTYPE, + MonotonicConstraint, +) +from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder +from sklearn.ensemble._hist_gradient_boosting.splitting import ( + Splitter, + compute_node_value, +) +from sklearn.utils._openmp_helpers import _openmp_effective_n_threads +from sklearn.utils._testing import skip_if_32bit + +n_threads = _openmp_effective_n_threads() + + +@pytest.mark.parametrize("n_bins", [3, 32, 256]) +def test_histogram_split(n_bins): + rng = np.random.RandomState(42) + feature_idx = 0 + l2_regularization = 0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + X_binned = np.asfortranarray( + rng.randint(0, n_bins - 1, size=(int(1e4), 1)), dtype=X_BINNED_DTYPE + ) + binned_feature = X_binned.T[feature_idx] + sample_indices = np.arange(binned_feature.shape[0], dtype=np.uint32) + ordered_hessians = np.ones_like(binned_feature, dtype=G_H_DTYPE) + all_hessians = ordered_hessians + sum_hessians = all_hessians.sum() + hessians_are_constant = False + + for true_bin in range(1, n_bins - 2): + for sign in [-1, 1]: + ordered_gradients = np.full_like(binned_feature, sign, dtype=G_H_DTYPE) + ordered_gradients[binned_feature <= true_bin] *= -1 + all_gradients = ordered_gradients + sum_gradients = all_gradients.sum() + + builder = HistogramBuilder( + X_binned, + n_bins, + all_gradients, + all_hessians, + hessians_are_constant, + n_threads, + ) + n_bins_non_missing = np.array( + [n_bins - 1] * X_binned.shape[1], dtype=np.uint32 + ) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + split_info = splitter.find_node_split( + sample_indices.shape[0], histograms, sum_gradients, sum_hessians, value + ) + + assert split_info.bin_idx == true_bin + assert split_info.gain >= 0 + assert split_info.feature_idx == feature_idx + assert ( + split_info.n_samples_left + split_info.n_samples_right + == sample_indices.shape[0] + ) + # Constant hessian: 1. per sample. + assert split_info.n_samples_left == split_info.sum_hessian_left + + +@skip_if_32bit +@pytest.mark.parametrize("constant_hessian", [True, False]) +def test_gradient_and_hessian_sanity(constant_hessian): + # This test checks that the values of gradients and hessians are + # consistent in different places: + # - in split_info: si.sum_gradient_left + si.sum_gradient_right must be + # equal to the gradient at the node. Same for hessians. + # - in the histograms: summing 'sum_gradients' over the bins must be + # constant across all features, and those sums must be equal to the + # node's gradient. Same for hessians. + + rng = np.random.RandomState(42) + + n_bins = 10 + n_features = 20 + n_samples = 500 + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + + X_binned = rng.randint( + 0, n_bins, size=(n_samples, n_features), dtype=X_BINNED_DTYPE + ) + X_binned = np.asfortranarray(X_binned) + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_gradients = rng.randn(n_samples).astype(G_H_DTYPE) + sum_gradients = all_gradients.sum() + if constant_hessian: + all_hessians = np.ones(1, dtype=G_H_DTYPE) + sum_hessians = 1 * n_samples + else: + all_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE) + sum_hessians = all_hessians.sum() + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, constant_hessian, n_threads + ) + n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + constant_hessian, + ) + + hists_parent = builder.compute_histograms_brute(sample_indices) + value_parent = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + si_parent = splitter.find_node_split( + n_samples, hists_parent, sum_gradients, sum_hessians, value_parent + ) + sample_indices_left, sample_indices_right, _ = splitter.split_indices( + si_parent, sample_indices + ) + + hists_left = builder.compute_histograms_brute(sample_indices_left) + value_left = compute_node_value( + si_parent.sum_gradient_left, + si_parent.sum_hessian_left, + -np.inf, + np.inf, + l2_regularization, + ) + hists_right = builder.compute_histograms_brute(sample_indices_right) + value_right = compute_node_value( + si_parent.sum_gradient_right, + si_parent.sum_hessian_right, + -np.inf, + np.inf, + l2_regularization, + ) + si_left = splitter.find_node_split( + n_samples, + hists_left, + si_parent.sum_gradient_left, + si_parent.sum_hessian_left, + value_left, + ) + si_right = splitter.find_node_split( + n_samples, + hists_right, + si_parent.sum_gradient_right, + si_parent.sum_hessian_right, + value_right, + ) + + # make sure that si.sum_gradient_left + si.sum_gradient_right have their + # expected value, same for hessians + for si, indices in ( + (si_parent, sample_indices), + (si_left, sample_indices_left), + (si_right, sample_indices_right), + ): + gradient = si.sum_gradient_right + si.sum_gradient_left + expected_gradient = all_gradients[indices].sum() + hessian = si.sum_hessian_right + si.sum_hessian_left + if constant_hessian: + expected_hessian = indices.shape[0] * all_hessians[0] + else: + expected_hessian = all_hessians[indices].sum() + + assert np.isclose(gradient, expected_gradient) + assert np.isclose(hessian, expected_hessian) + + # make sure sum of gradients in histograms are the same for all features, + # and make sure they're equal to their expected value + hists_parent = np.asarray(hists_parent, dtype=HISTOGRAM_DTYPE) + hists_left = np.asarray(hists_left, dtype=HISTOGRAM_DTYPE) + hists_right = np.asarray(hists_right, dtype=HISTOGRAM_DTYPE) + for hists, indices in ( + (hists_parent, sample_indices), + (hists_left, sample_indices_left), + (hists_right, sample_indices_right), + ): + # note: gradients and hessians have shape (n_features,), + # we're comparing them to *scalars*. This has the benefit of also + # making sure that all the entries are equal across features. + gradients = hists["sum_gradients"].sum(axis=1) # shape = (n_features,) + expected_gradient = all_gradients[indices].sum() # scalar + hessians = hists["sum_hessians"].sum(axis=1) + if constant_hessian: + # 0 is not the actual hessian, but it's not computed in this case + expected_hessian = 0.0 + else: + expected_hessian = all_hessians[indices].sum() + + assert np.allclose(gradients, expected_gradient) + assert np.allclose(hessians, expected_hessian) + + +def test_split_indices(): + # Check that split_indices returns the correct splits and that + # splitter.partition is consistent with what is returned. + rng = np.random.RandomState(421) + + n_bins = 5 + n_samples = 10 + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + + # split will happen on feature 1 and on bin 3 + X_binned = [ + [0, 0], + [0, 3], + [0, 4], + [0, 0], + [0, 0], + [0, 0], + [0, 0], + [0, 4], + [0, 0], + [0, 4], + ] + X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE) + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_gradients = rng.randn(n_samples).astype(G_H_DTYPE) + all_hessians = np.ones(1, dtype=G_H_DTYPE) + sum_gradients = all_gradients.sum() + sum_hessians = 1 * n_samples + hessians_are_constant = True + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + n_bins_non_missing = np.array([n_bins] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + assert np.all(sample_indices == splitter.partition) + + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + si_root = splitter.find_node_split( + n_samples, histograms, sum_gradients, sum_hessians, value + ) + + # sanity checks for best split + assert si_root.feature_idx == 1 + assert si_root.bin_idx == 3 + + samples_left, samples_right, position_right = splitter.split_indices( + si_root, splitter.partition + ) + assert set(samples_left) == set([0, 1, 3, 4, 5, 6, 8]) + assert set(samples_right) == set([2, 7, 9]) + + assert list(samples_left) == list(splitter.partition[:position_right]) + assert list(samples_right) == list(splitter.partition[position_right:]) + + # Check that the resulting split indices sizes are consistent with the + # count statistics anticipated when looking for the best split. + assert samples_left.shape[0] == si_root.n_samples_left + assert samples_right.shape[0] == si_root.n_samples_right + + +def test_min_gain_to_split(): + # Try to split a pure node (all gradients are equal, same for hessians) + # with min_gain_to_split = 0 and make sure that the node is not split (best + # possible gain = -1). Note: before the strict inequality comparison, this + # test would fail because the node would be split with a gain of 0. + rng = np.random.RandomState(42) + l2_regularization = 0 + min_hessian_to_split = 0 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + n_bins = 255 + n_samples = 100 + X_binned = np.asfortranarray( + rng.randint(0, n_bins, size=(n_samples, 1)), dtype=X_BINNED_DTYPE + ) + binned_feature = X_binned[:, 0] + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_hessians = np.ones_like(binned_feature, dtype=G_H_DTYPE) + all_gradients = np.ones_like(binned_feature, dtype=G_H_DTYPE) + sum_gradients = all_gradients.sum() + sum_hessians = all_hessians.sum() + hessians_are_constant = False + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + split_info = splitter.find_node_split( + n_samples, histograms, sum_gradients, sum_hessians, value + ) + assert split_info.gain == -1 + + +@pytest.mark.parametrize( + ( + "X_binned, all_gradients, has_missing_values, n_bins_non_missing, " + " expected_split_on_nan, expected_bin_idx, expected_go_to_left" + ), + [ + # basic sanity check with no missing values: given the gradient + # values, the split must occur on bin_idx=3 + ( + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], # X_binned + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], # gradients + False, # no missing values + 10, # n_bins_non_missing + False, # don't split on nans + 3, # expected_bin_idx + "not_applicable", + ), + # We replace 2 samples by NaNs (bin_idx=8) + # These 2 samples were mapped to the left node before, so they should + # be mapped to left node again + # Notice how the bin_idx threshold changes from 3 to 1. + ( + [8, 0, 1, 8, 2, 3, 4, 5, 6, 7], # 8 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 8, # n_bins_non_missing + False, # don't split on nans + 1, # cut on bin_idx=1 + True, + ), # missing values go to left + # same as above, but with non-consecutive missing_values_bin + ( + [9, 0, 1, 9, 2, 3, 4, 5, 6, 7], # 9 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 8, # n_bins_non_missing + False, # don't split on nans + 1, # cut on bin_idx=1 + True, + ), # missing values go to left + # this time replacing 2 samples that were on the right. + ( + [0, 1, 2, 3, 8, 4, 8, 5, 6, 7], # 8 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 8, # n_bins_non_missing + False, # don't split on nans + 3, # cut on bin_idx=3 (like in first case) + False, + ), # missing values go to right + # same as above, but with non-consecutive missing_values_bin + ( + [0, 1, 2, 3, 9, 4, 9, 5, 6, 7], # 9 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 8, # n_bins_non_missing + False, # don't split on nans + 3, # cut on bin_idx=3 (like in first case) + False, + ), # missing values go to right + # For the following cases, split_on_nans is True (we replace all of + # the samples with nans, instead of just 2). + ( + [0, 1, 2, 3, 4, 4, 4, 4, 4, 4], # 4 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 4, # n_bins_non_missing + True, # split on nans + 3, # cut on bin_idx=3 + False, + ), # missing values go to right + # same as above, but with non-consecutive missing_values_bin + ( + [0, 1, 2, 3, 9, 9, 9, 9, 9, 9], # 9 <=> missing + [1, 1, 1, 1, 1, 1, 5, 5, 5, 5], + True, # missing values + 4, # n_bins_non_missing + True, # split on nans + 3, # cut on bin_idx=3 + False, + ), # missing values go to right + ( + [6, 6, 6, 6, 0, 1, 2, 3, 4, 5], # 6 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 6, # n_bins_non_missing + True, # split on nans + 5, # cut on bin_idx=5 + False, + ), # missing values go to right + # same as above, but with non-consecutive missing_values_bin + ( + [9, 9, 9, 9, 0, 1, 2, 3, 4, 5], # 9 <=> missing + [1, 1, 1, 1, 5, 5, 5, 5, 5, 5], + True, # missing values + 6, # n_bins_non_missing + True, # split on nans + 5, # cut on bin_idx=5 + False, + ), # missing values go to right + ], +) +def test_splitting_missing_values( + X_binned, + all_gradients, + has_missing_values, + n_bins_non_missing, + expected_split_on_nan, + expected_bin_idx, + expected_go_to_left, +): + # Make sure missing values are properly supported. + # we build an artificial example with gradients such that the best split + # is on bin_idx=3, when there are no missing values. + # Then we introduce missing values and: + # - make sure the chosen bin is correct (find_best_bin()): it's + # still the same split, even though the index of the bin may change + # - make sure the missing values are mapped to the correct child + # (split_indices()) + + n_bins = max(X_binned) + 1 + n_samples = len(X_binned) + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + + sample_indices = np.arange(n_samples, dtype=np.uint32) + X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1) + X_binned = np.asfortranarray(X_binned) + all_gradients = np.array(all_gradients, dtype=G_H_DTYPE) + has_missing_values = np.array([has_missing_values], dtype=np.uint8) + all_hessians = np.ones(1, dtype=G_H_DTYPE) + sum_gradients = all_gradients.sum() + sum_hessians = 1 * n_samples + hessians_are_constant = True + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + + n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + split_info = splitter.find_node_split( + n_samples, histograms, sum_gradients, sum_hessians, value + ) + + assert split_info.bin_idx == expected_bin_idx + if has_missing_values: + assert split_info.missing_go_to_left == expected_go_to_left + + split_on_nan = split_info.bin_idx == n_bins_non_missing[0] - 1 + assert split_on_nan == expected_split_on_nan + + # Make sure the split is properly computed. + # This also make sure missing values are properly assigned to the correct + # child in split_indices() + samples_left, samples_right, _ = splitter.split_indices( + split_info, splitter.partition + ) + + if not expected_split_on_nan: + # When we don't split on nans, the split should always be the same. + assert set(samples_left) == set([0, 1, 2, 3]) + assert set(samples_right) == set([4, 5, 6, 7, 8, 9]) + else: + # When we split on nans, samples with missing values are always mapped + # to the right child. + missing_samples_indices = np.flatnonzero( + np.array(X_binned) == missing_values_bin_idx + ) + non_missing_samples_indices = np.flatnonzero( + np.array(X_binned) != missing_values_bin_idx + ) + + assert set(samples_right) == set(missing_samples_indices) + assert set(samples_left) == set(non_missing_samples_indices) + + +@pytest.mark.parametrize( + "X_binned, has_missing_values, n_bins_non_missing, ", + [ + # one category + ([0] * 20, False, 1), + # all categories appear less than MIN_CAT_SUPPORT (hardcoded to 10) + ([0] * 9 + [1] * 8, False, 2), + # only one category appears more than MIN_CAT_SUPPORT + ([0] * 12 + [1] * 8, False, 2), + # missing values + category appear less than MIN_CAT_SUPPORT + # 9 is missing + ([0] * 9 + [1] * 8 + [9] * 4, True, 2), + # no non-missing category + ([9] * 11, True, 0), + ], +) +def test_splitting_categorical_cat_smooth( + X_binned, has_missing_values, n_bins_non_missing +): + # Checks categorical splits are correct when the MIN_CAT_SUPPORT constraint + # isn't respected: there are no splits + + n_bins = max(X_binned) + 1 + n_samples = len(X_binned) + X_binned = np.array([X_binned], dtype=X_BINNED_DTYPE).T + X_binned = np.asfortranarray(X_binned) + + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_gradients = np.ones(n_samples, dtype=G_H_DTYPE) + has_missing_values = np.array([has_missing_values], dtype=np.uint8) + all_hessians = np.ones(1, dtype=G_H_DTYPE) + sum_gradients = all_gradients.sum() + sum_hessians = n_samples + hessians_are_constant = True + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + + n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + split_info = splitter.find_node_split( + n_samples, histograms, sum_gradients, sum_hessians, value + ) + + # no split found + assert split_info.gain == -1 + + +def _assert_categories_equals_bitset(categories, bitset): + # assert that the bitset exactly corresponds to the categories + # bitset is assumed to be an array of 8 uint32 elements + + # form bitset from threshold + expected_bitset = np.zeros(8, dtype=np.uint32) + for cat in categories: + idx = cat // 32 + shift = cat % 32 + expected_bitset[idx] |= 1 << shift + + # check for equality + assert_array_equal(expected_bitset, bitset) + + +@pytest.mark.parametrize( + ( + "X_binned, all_gradients, expected_categories_left, n_bins_non_missing," + "missing_values_bin_idx, has_missing_values, expected_missing_go_to_left" + ), + [ + # 4 categories + ( + [0, 1, 2, 3] * 11, # X_binned + [10, 1, 10, 10] * 11, # all_gradients + [1], # expected_categories_left + 4, # n_bins_non_missing + 4, # missing_values_bin_idx + False, # has_missing_values + None, + ), # expected_missing_go_to_left, unchecked + # Make sure that the categories that are on the right (second half) of + # the sorted categories array can still go in the left child. In this + # case, the best split was found when scanning from right to left. + ( + [0, 1, 2, 3] * 11, # X_binned + [10, 10, 10, 1] * 11, # all_gradients + [3], # expected_categories_left + 4, # n_bins_non_missing + 4, # missing_values_bin_idx + False, # has_missing_values + None, + ), # expected_missing_go_to_left, unchecked + # categories that don't respect MIN_CAT_SUPPORT (cat 4) are always + # mapped to the right child + ( + [0, 1, 2, 3] * 11 + [4] * 5, # X_binned + [10, 10, 10, 1] * 11 + [10] * 5, # all_gradients + [3], # expected_categories_left + 4, # n_bins_non_missing + 4, # missing_values_bin_idx + False, # has_missing_values + None, + ), # expected_missing_go_to_left, unchecked + # categories that don't respect MIN_CAT_SUPPORT are always mapped to + # the right child: in this case a more sensible split could have been + # 3, 4 - 0, 1, 2 + # But the split is still 3 - 0, 1, 2, 4. this is because we only scan + # up to the middle of the sorted category array (0, 1, 2, 3), and + # because we exclude cat 4 in this array. + ( + [0, 1, 2, 3] * 11 + [4] * 5, # X_binned + [10, 10, 10, 1] * 11 + [1] * 5, # all_gradients + [3], # expected_categories_left + 4, # n_bins_non_missing + 4, # missing_values_bin_idx + False, # has_missing_values + None, + ), # expected_missing_go_to_left, unchecked + # 4 categories with missing values that go to the right + ( + [0, 1, 2] * 11 + [9] * 11, # X_binned + [10, 1, 10] * 11 + [10] * 11, # all_gradients + [1], # expected_categories_left + 3, # n_bins_non_missing + 9, # missing_values_bin_idx + True, # has_missing_values + False, + ), # expected_missing_go_to_left + # 4 categories with missing values that go to the left + ( + [0, 1, 2] * 11 + [9] * 11, # X_binned + [10, 1, 10] * 11 + [1] * 11, # all_gradients + [1, 9], # expected_categories_left + 3, # n_bins_non_missing + 9, # missing_values_bin_idx + True, # has_missing_values + True, + ), # expected_missing_go_to_left + # split is on the missing value + ( + [0, 1, 2, 3, 4] * 11 + [255] * 12, # X_binned + [10, 10, 10, 10, 10] * 11 + [1] * 12, # all_gradients + [255], # expected_categories_left + 5, # n_bins_non_missing + 255, # missing_values_bin_idx + True, # has_missing_values + True, + ), # expected_missing_go_to_left + # split on even categories + ( + list(range(60)) * 12, # X_binned + [10, 1] * 360, # all_gradients + list(range(1, 60, 2)), # expected_categories_left + 59, # n_bins_non_missing + 59, # missing_values_bin_idx + True, # has_missing_values + True, + ), # expected_missing_go_to_left + # split on every 8 categories + ( + list(range(256)) * 12, # X_binned + [10, 10, 10, 10, 10, 10, 10, 1] * 384, # all_gradients + list(range(7, 256, 8)), # expected_categories_left + 255, # n_bins_non_missing + 255, # missing_values_bin_idx + True, # has_missing_values + True, + ), # expected_missing_go_to_left + ], +) +def test_splitting_categorical_sanity( + X_binned, + all_gradients, + expected_categories_left, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + expected_missing_go_to_left, +): + # Tests various combinations of categorical splits + + n_samples = len(X_binned) + n_bins = max(X_binned) + 1 + + X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1) + X_binned = np.asfortranarray(X_binned) + + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_gradients = np.array(all_gradients, dtype=G_H_DTYPE) + all_hessians = np.ones(1, dtype=G_H_DTYPE) + has_missing_values = np.array([has_missing_values], dtype=np.uint8) + sum_gradients = all_gradients.sum() + sum_hessians = n_samples + hessians_are_constant = True + + builder = HistogramBuilder( + X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads + ) + + n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8) + + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + histograms = builder.compute_histograms_brute(sample_indices) + + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + split_info = splitter.find_node_split( + n_samples, histograms, sum_gradients, sum_hessians, value + ) + + assert split_info.is_categorical + assert split_info.gain > 0 + _assert_categories_equals_bitset( + expected_categories_left, split_info.left_cat_bitset + ) + if has_missing_values: + assert split_info.missing_go_to_left == expected_missing_go_to_left + # If there is no missing value during training, the flag missing_go_to_left + # is set later in the grower. + + # make sure samples are split correctly + samples_left, samples_right, _ = splitter.split_indices( + split_info, splitter.partition + ) + + left_mask = np.isin(X_binned.ravel(), expected_categories_left) + assert_array_equal(sample_indices[left_mask], samples_left) + assert_array_equal(sample_indices[~left_mask], samples_right) + + +def test_split_interaction_constraints(): + """Check that allowed_features are respected.""" + n_features = 4 + # features 1 and 2 are not allowed to be split on + allowed_features = np.array([0, 3], dtype=np.uint32) + n_bins = 5 + n_samples = 10 + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_hessians = np.ones(1, dtype=G_H_DTYPE) + sum_hessians = n_samples + hessians_are_constant = True + + split_features = [] + + # The loop is to ensure that we split at least once on each allowed feature (0, 3). + # This is tracked by split_features and checked at the end. + for i in range(10): + rng = np.random.RandomState(919 + i) + X_binned = np.asfortranarray( + rng.randint(0, n_bins - 1, size=(n_samples, n_features)), + dtype=X_BINNED_DTYPE, + ) + X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE) + + # Make feature 1 very important + all_gradients = (10 * X_binned[:, 1] + rng.randn(n_samples)).astype(G_H_DTYPE) + sum_gradients = all_gradients.sum() + + builder = HistogramBuilder( + X_binned, + n_bins, + all_gradients, + all_hessians, + hessians_are_constant, + n_threads, + ) + n_bins_non_missing = np.array([n_bins] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + splitter = Splitter( + X_binned, + n_bins_non_missing, + missing_values_bin_idx, + has_missing_values, + is_categorical, + monotonic_cst, + l2_regularization, + min_hessian_to_split, + min_samples_leaf, + min_gain_to_split, + hessians_are_constant, + ) + + assert np.all(sample_indices == splitter.partition) + + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + + # with all features allowed, feature 1 should be split on as it is the most + # important one by construction of the gradients + si_root = splitter.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + allowed_features=None, + ) + assert si_root.feature_idx == 1 + + # only features 0 and 3 are allowed to be split on + si_root = splitter.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + allowed_features=allowed_features, + ) + split_features.append(si_root.feature_idx) + assert si_root.feature_idx in allowed_features + + # make sure feature 0 and feature 3 are split on in the constraint setting + assert set(allowed_features) == set(split_features) + + +@pytest.mark.parametrize("forbidden_features", [set(), {1, 3}]) +def test_split_feature_fraction_per_split(forbidden_features): + """Check that feature_fraction_per_split is respected. + + Because we set `n_features = 4` and `feature_fraction_per_split = 0.25`, it means + that calling `splitter.find_node_split` will be allowed to select a split for a + single completely random feature at each call. So if we iterate enough, we should + cover all the allowed features, irrespective of the values of the gradients and + Hessians of the objective. + """ + n_features = 4 + allowed_features = np.array( + list(set(range(n_features)) - forbidden_features), dtype=np.uint32 + ) + n_bins = 5 + n_samples = 40 + l2_regularization = 0.0 + min_hessian_to_split = 1e-3 + min_samples_leaf = 1 + min_gain_to_split = 0.0 + rng = np.random.default_rng(42) + + sample_indices = np.arange(n_samples, dtype=np.uint32) + all_gradients = rng.uniform(low=0.5, high=1, size=n_samples).astype(G_H_DTYPE) + sum_gradients = all_gradients.sum() + all_hessians = np.ones(1, dtype=G_H_DTYPE) + sum_hessians = n_samples + hessians_are_constant = True + + X_binned = np.asfortranarray( + rng.integers(low=0, high=n_bins - 1, size=(n_samples, n_features)), + dtype=X_BINNED_DTYPE, + ) + X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE) + builder = HistogramBuilder( + X_binned, + n_bins, + all_gradients, + all_hessians, + hessians_are_constant, + n_threads, + ) + histograms = builder.compute_histograms_brute(sample_indices) + value = compute_node_value( + sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization + ) + n_bins_non_missing = np.array([n_bins] * X_binned.shape[1], dtype=np.uint32) + has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8) + monotonic_cst = np.array( + [MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8 + ) + is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8) + missing_values_bin_idx = n_bins - 1 + + params = dict( + X_binned=X_binned, + n_bins_non_missing=n_bins_non_missing, + missing_values_bin_idx=missing_values_bin_idx, + has_missing_values=has_missing_values, + is_categorical=is_categorical, + monotonic_cst=monotonic_cst, + l2_regularization=l2_regularization, + min_hessian_to_split=min_hessian_to_split, + min_samples_leaf=min_samples_leaf, + min_gain_to_split=min_gain_to_split, + hessians_are_constant=hessians_are_constant, + rng=rng, + ) + splitter_subsample = Splitter( + feature_fraction_per_split=0.25, # THIS is the important setting here. + **params, + ) + splitter_all_features = Splitter(feature_fraction_per_split=1.0, **params) + + assert np.all(sample_indices == splitter_subsample.partition) + + split_features_subsample = [] + split_features_all = [] + # The loop is to ensure that we split at least once on each feature. + # This is tracked by split_features and checked at the end. + for i in range(20): + si_root = splitter_subsample.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + allowed_features=allowed_features, + ) + split_features_subsample.append(si_root.feature_idx) + + # This second splitter is our "counterfactual". + si_root = splitter_all_features.find_node_split( + n_samples, + histograms, + sum_gradients, + sum_hessians, + value, + allowed_features=allowed_features, + ) + split_features_all.append(si_root.feature_idx) + + # Make sure all features are split on. + assert set(split_features_subsample) == set(allowed_features) + + # Make sure, our counterfactual always splits on same feature. + assert len(set(split_features_all)) == 1 diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py new file mode 100644 index 0000000000000000000000000000000000000000..03a2720b36127108e06537a3f4a85c5b9d4e7701 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py @@ -0,0 +1,231 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn.base import clone +from sklearn.datasets import make_classification, make_regression +from sklearn.ensemble import ( + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from sklearn.metrics import check_scoring + +X_classification, y_classification = make_classification(random_state=0) +X_regression, y_regression = make_regression(random_state=0) + + +def _assert_predictor_equal(gb_1, gb_2, X): + """Assert that two HistGBM instances are identical.""" + # Check identical nodes for each tree + for pred_ith_1, pred_ith_2 in zip(gb_1._predictors, gb_2._predictors): + for predictor_1, predictor_2 in zip(pred_ith_1, pred_ith_2): + assert_array_equal(predictor_1.nodes, predictor_2.nodes) + + # Check identical predictions + assert_allclose(gb_1.predict(X), gb_2.predict(X)) + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +def test_max_iter_with_warm_start_validation(GradientBoosting, X, y): + # Check that a ValueError is raised when the maximum number of iterations + # is smaller than the number of iterations from the previous fit when warm + # start is True. + + estimator = GradientBoosting(max_iter=10, early_stopping=False, warm_start=True) + estimator.fit(X, y) + estimator.set_params(max_iter=5) + err_msg = ( + "max_iter=5 must be larger than or equal to n_iter_=10 when warm_start==True" + ) + with pytest.raises(ValueError, match=err_msg): + estimator.fit(X, y) + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +def test_warm_start_yields_identical_results(GradientBoosting, X, y): + # Make sure that fitting 50 iterations and then 25 with warm start is + # equivalent to fitting 75 iterations. + + rng = 42 + gb_warm_start = GradientBoosting( + n_iter_no_change=100, max_iter=50, random_state=rng, warm_start=True + ) + gb_warm_start.fit(X, y).set_params(max_iter=75).fit(X, y) + + gb_no_warm_start = GradientBoosting( + n_iter_no_change=100, max_iter=75, random_state=rng, warm_start=False + ) + gb_no_warm_start.fit(X, y) + + # Check that both predictors are equal + _assert_predictor_equal(gb_warm_start, gb_no_warm_start, X) + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +def test_warm_start_max_depth(GradientBoosting, X, y): + # Test if possible to fit trees of different depth in ensemble. + gb = GradientBoosting( + max_iter=20, + min_samples_leaf=1, + warm_start=True, + max_depth=2, + early_stopping=False, + ) + gb.fit(X, y) + gb.set_params(max_iter=30, max_depth=3, n_iter_no_change=110) + gb.fit(X, y) + + # First 20 trees have max_depth == 2 + for i in range(20): + assert gb._predictors[i][0].get_max_depth() == 2 + # Last 10 trees have max_depth == 3 + for i in range(1, 11): + assert gb._predictors[-i][0].get_max_depth() == 3 + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +@pytest.mark.parametrize("scoring", (None, "loss")) +def test_warm_start_early_stopping(GradientBoosting, X, y, scoring): + # Make sure that early stopping occurs after a small number of iterations + # when fitting a second time with warm starting. + + n_iter_no_change = 5 + gb = GradientBoosting( + n_iter_no_change=n_iter_no_change, + max_iter=10000, + early_stopping=True, + random_state=42, + warm_start=True, + tol=1e-3, + scoring=scoring, + ) + gb.fit(X, y) + n_iter_first_fit = gb.n_iter_ + gb.fit(X, y) + n_iter_second_fit = gb.n_iter_ + assert 0 < n_iter_second_fit - n_iter_first_fit < n_iter_no_change + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +def test_warm_start_equal_n_estimators(GradientBoosting, X, y): + # Test if warm start with equal n_estimators does nothing + gb_1 = GradientBoosting(max_depth=2, early_stopping=False) + gb_1.fit(X, y) + + gb_2 = clone(gb_1) + gb_2.set_params(max_iter=gb_1.max_iter, warm_start=True, n_iter_no_change=5) + gb_2.fit(X, y) + + # Check that both predictors are equal + _assert_predictor_equal(gb_1, gb_2, X) + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +def test_warm_start_clear(GradientBoosting, X, y): + # Test if fit clears state. + gb_1 = GradientBoosting(n_iter_no_change=5, random_state=42) + gb_1.fit(X, y) + + gb_2 = GradientBoosting(n_iter_no_change=5, random_state=42, warm_start=True) + gb_2.fit(X, y) # inits state + gb_2.set_params(warm_start=False) + gb_2.fit(X, y) # clears old state and equals est + + # Check that both predictors have the same train_score_ and + # validation_score_ attributes + assert_allclose(gb_1.train_score_, gb_2.train_score_) + assert_allclose(gb_1.validation_score_, gb_2.validation_score_) + + # Check that both predictors are equal + _assert_predictor_equal(gb_1, gb_2, X) + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (HistGradientBoostingClassifier, X_classification, y_classification), + (HistGradientBoostingRegressor, X_regression, y_regression), + ], +) +@pytest.mark.parametrize("rng_type", ("none", "int", "instance")) +def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type): + # Make sure the seeds for train/val split and small trainset subsampling + # are correctly set in a warm start context. + def _get_rng(rng_type): + # Helper to avoid consuming rngs + if rng_type == "none": + return None + elif rng_type == "int": + return 42 + else: + return np.random.RandomState(0) + + random_state = _get_rng(rng_type) + gb_1 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state) + gb_1.set_params(scoring=check_scoring(gb_1)) + gb_1.fit(X, y) + random_seed_1_1 = gb_1._random_seed + + gb_1.fit(X, y) + random_seed_1_2 = gb_1._random_seed # clear the old state, different seed + + random_state = _get_rng(rng_type) + gb_2 = GradientBoosting( + early_stopping=True, max_iter=2, random_state=random_state, warm_start=True + ) + gb_2.set_params(scoring=check_scoring(gb_2)) + gb_2.fit(X, y) # inits state + random_seed_2_1 = gb_2._random_seed + gb_2.fit(X, y) # clears old state and equals est + random_seed_2_2 = gb_2._random_seed + + # Without warm starting, the seeds should be + # * all different if random state is None + # * all equal if random state is an integer + # * different when refitting and equal with a new estimator (because + # the random state is mutated) + if rng_type == "none": + assert random_seed_1_1 != random_seed_1_2 != random_seed_2_1 + elif rng_type == "int": + assert random_seed_1_1 == random_seed_1_2 == random_seed_2_1 + else: + assert random_seed_1_1 == random_seed_2_1 != random_seed_1_2 + + # With warm starting, the seeds must be equal + assert random_seed_2_1 == random_seed_2_2 diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae9f572d7833adbb1974670ea52e0911e257301 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b22f59724b2de32f86036b058913cb4ef69b555 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20224c0a39446e1dd8d1318e0876d163645ca822 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1831e280fe7e5d1648002a6a2ee8149831620595 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e96b804facf21730f9cb40617c04270c68fdc64a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58bea6c1878d57613ad693b28ccaf98d33ed000b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_gradient_boosting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f563326aa4564d62919306801d1d0e26beeea9e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76cf39f0c4af980caf0dcef0388c30785cfa643d Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_stacking.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..437135fc15be7f0f3474d94657e19762b411e532 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_voting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca43a9cdeca51f2f9cc4a8d2f54ed6b059ed74b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1e308cee33bddb623dfac37d678e70cc27e20d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_bagging.py @@ -0,0 +1,938 @@ +""" +Testing for the bagging ensemble module (sklearn.ensemble.bagging). +""" + +# Author: Gilles Louppe +# License: BSD 3 clause +from itertools import cycle, product + +import joblib +import numpy as np +import pytest + +from sklearn.base import BaseEstimator +from sklearn.datasets import load_diabetes, load_iris, make_hastie_10_2 +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import ( + BaggingClassifier, + BaggingRegressor, + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, +) +from sklearn.feature_selection import SelectKBest +from sklearn.linear_model import LogisticRegression, Perceptron +from sklearn.model_selection import GridSearchCV, ParameterGrid, train_test_split +from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import FunctionTransformer, scale +from sklearn.random_projection import SparseRandomProjection +from sklearn.svm import SVC, SVR +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +rng = check_random_state(0) + +# also load the iris dataset +# and randomly permute it +iris = load_iris() +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# also load the diabetes dataset +# and randomly permute it +diabetes = load_diabetes() +perm = rng.permutation(diabetes.target.size) +diabetes.data = diabetes.data[perm] +diabetes.target = diabetes.target[perm] + + +def test_classification(): + # Check classification for various parameter settings. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + grid = ParameterGrid( + { + "max_samples": [0.5, 1.0], + "max_features": [1, 4], + "bootstrap": [True, False], + "bootstrap_features": [True, False], + } + ) + estimators = [ + None, + DummyClassifier(), + Perceptron(max_iter=20), + DecisionTreeClassifier(max_depth=2), + KNeighborsClassifier(), + SVC(), + ] + # Try different parameter settings with different base classifiers without + # doing the full cartesian product to keep the test durations low. + for params, estimator in zip(grid, cycle(estimators)): + BaggingClassifier( + estimator=estimator, + random_state=rng, + n_estimators=2, + **params, + ).fit(X_train, y_train).predict(X_test) + + +@pytest.mark.parametrize( + "sparse_container, params, method", + product( + CSR_CONTAINERS + CSC_CONTAINERS, + [ + { + "max_samples": 0.5, + "max_features": 2, + "bootstrap": True, + "bootstrap_features": True, + }, + { + "max_samples": 1.0, + "max_features": 4, + "bootstrap": True, + "bootstrap_features": True, + }, + {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, + {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, + ], + ["predict", "predict_proba", "predict_log_proba", "decision_function"], + ), +) +def test_sparse_classification(sparse_container, params, method): + # Check classification for various parameter settings on sparse input. + + class CustomSVC(SVC): + """SVC variant that records the nature of the training set""" + + def fit(self, X, y): + super().fit(X, y) + self.data_type_ = type(X) + return self + + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + scale(iris.data), iris.target, random_state=rng + ) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + # Trained on sparse format + sparse_classifier = BaggingClassifier( + estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"), + random_state=1, + **params, + ).fit(X_train_sparse, y_train) + sparse_results = getattr(sparse_classifier, method)(X_test_sparse) + + # Trained on dense format + dense_classifier = BaggingClassifier( + estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"), + random_state=1, + **params, + ).fit(X_train, y_train) + dense_results = getattr(dense_classifier, method)(X_test) + assert_array_almost_equal(sparse_results, dense_results) + + sparse_type = type(X_train_sparse) + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert all([t == sparse_type for t in types]) + + +def test_regression(): + # Check regression for various parameter settings. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data[:50], diabetes.target[:50], random_state=rng + ) + grid = ParameterGrid( + { + "max_samples": [0.5, 1.0], + "max_features": [0.5, 1.0], + "bootstrap": [True, False], + "bootstrap_features": [True, False], + } + ) + + for estimator in [ + None, + DummyRegressor(), + DecisionTreeRegressor(), + KNeighborsRegressor(), + SVR(), + ]: + for params in grid: + BaggingRegressor(estimator=estimator, random_state=rng, **params).fit( + X_train, y_train + ).predict(X_test) + + +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS) +def test_sparse_regression(sparse_container): + # Check regression for various parameter settings on sparse input. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data[:50], diabetes.target[:50], random_state=rng + ) + + class CustomSVR(SVR): + """SVC variant that records the nature of the training set""" + + def fit(self, X, y): + super().fit(X, y) + self.data_type_ = type(X) + return self + + parameter_sets = [ + { + "max_samples": 0.5, + "max_features": 2, + "bootstrap": True, + "bootstrap_features": True, + }, + { + "max_samples": 1.0, + "max_features": 4, + "bootstrap": True, + "bootstrap_features": True, + }, + {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, + {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, + ] + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + for params in parameter_sets: + # Trained on sparse format + sparse_classifier = BaggingRegressor( + estimator=CustomSVR(), random_state=1, **params + ).fit(X_train_sparse, y_train) + sparse_results = sparse_classifier.predict(X_test_sparse) + + # Trained on dense format + dense_results = ( + BaggingRegressor(estimator=CustomSVR(), random_state=1, **params) + .fit(X_train, y_train) + .predict(X_test) + ) + + sparse_type = type(X_train_sparse) + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert_array_almost_equal(sparse_results, dense_results) + assert all([t == sparse_type for t in types]) + assert_array_almost_equal(sparse_results, dense_results) + + +class DummySizeEstimator(BaseEstimator): + def fit(self, X, y): + self.training_size_ = X.shape[0] + self.training_hash_ = joblib.hash(X) + + def predict(self, X): + return np.ones(X.shape[0]) + + +def test_bootstrap_samples(): + # Test that bootstrapping samples generate non-perfect base estimators. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + estimator = DecisionTreeRegressor().fit(X_train, y_train) + + # without bootstrap, all trees are perfect on the training set + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_samples=1.0, + bootstrap=False, + random_state=rng, + ).fit(X_train, y_train) + + assert estimator.score(X_train, y_train) == ensemble.score(X_train, y_train) + + # with bootstrap, trees are no longer perfect on the training set + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_samples=1.0, + bootstrap=True, + random_state=rng, + ).fit(X_train, y_train) + + assert estimator.score(X_train, y_train) > ensemble.score(X_train, y_train) + + # check that each sampling correspond to a complete bootstrap resample. + # the size of each bootstrap should be the same as the input data but + # the data should be different (checked using the hash of the data). + ensemble = BaggingRegressor(estimator=DummySizeEstimator(), bootstrap=True).fit( + X_train, y_train + ) + training_hash = [] + for estimator in ensemble.estimators_: + assert estimator.training_size_ == X_train.shape[0] + training_hash.append(estimator.training_hash_) + assert len(set(training_hash)) == len(training_hash) + + +def test_bootstrap_features(): + # Test that bootstrapping features may generate duplicate features. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_features=1.0, + bootstrap_features=False, + random_state=rng, + ).fit(X_train, y_train) + + for features in ensemble.estimators_features_: + assert diabetes.data.shape[1] == np.unique(features).shape[0] + + ensemble = BaggingRegressor( + estimator=DecisionTreeRegressor(), + max_features=1.0, + bootstrap_features=True, + random_state=rng, + ).fit(X_train, y_train) + + for features in ensemble.estimators_features_: + assert diabetes.data.shape[1] > np.unique(features).shape[0] + + +def test_probability(): + # Predict probabilities. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + + with np.errstate(divide="ignore", invalid="ignore"): + # Normal case + ensemble = BaggingClassifier( + estimator=DecisionTreeClassifier(), random_state=rng + ).fit(X_train, y_train) + + assert_array_almost_equal( + np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)) + ) + + assert_array_almost_equal( + ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)) + ) + + # Degenerate case, where some classes are missing + ensemble = BaggingClassifier( + estimator=LogisticRegression(), random_state=rng, max_samples=5 + ).fit(X_train, y_train) + + assert_array_almost_equal( + np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)) + ) + + assert_array_almost_equal( + ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)) + ) + + +def test_oob_score_classification(): + # Check that oob prediction is a good estimation of the generalization + # error. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + + for estimator in [DecisionTreeClassifier(), SVC()]: + clf = BaggingClassifier( + estimator=estimator, + n_estimators=100, + bootstrap=True, + oob_score=True, + random_state=rng, + ).fit(X_train, y_train) + + test_score = clf.score(X_test, y_test) + + assert abs(test_score - clf.oob_score_) < 0.1 + + # Test with few estimators + warn_msg = ( + "Some inputs do not have OOB scores. This probably means too few " + "estimators were used to compute any reliable oob estimates." + ) + with pytest.warns(UserWarning, match=warn_msg): + clf = BaggingClassifier( + estimator=estimator, + n_estimators=1, + bootstrap=True, + oob_score=True, + random_state=rng, + ) + clf.fit(X_train, y_train) + + +def test_oob_score_regression(): + # Check that oob prediction is a good estimation of the generalization + # error. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + clf = BaggingRegressor( + estimator=DecisionTreeRegressor(), + n_estimators=50, + bootstrap=True, + oob_score=True, + random_state=rng, + ).fit(X_train, y_train) + + test_score = clf.score(X_test, y_test) + + assert abs(test_score - clf.oob_score_) < 0.1 + + # Test with few estimators + warn_msg = ( + "Some inputs do not have OOB scores. This probably means too few " + "estimators were used to compute any reliable oob estimates." + ) + with pytest.warns(UserWarning, match=warn_msg): + regr = BaggingRegressor( + estimator=DecisionTreeRegressor(), + n_estimators=1, + bootstrap=True, + oob_score=True, + random_state=rng, + ) + regr.fit(X_train, y_train) + + +def test_single_estimator(): + # Check singleton ensembles. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + clf1 = BaggingRegressor( + estimator=KNeighborsRegressor(), + n_estimators=1, + bootstrap=False, + bootstrap_features=False, + random_state=rng, + ).fit(X_train, y_train) + + clf2 = KNeighborsRegressor().fit(X_train, y_train) + + assert_array_almost_equal(clf1.predict(X_test), clf2.predict(X_test)) + + +def test_error(): + # Test support of decision_function + X, y = iris.data, iris.target + base = DecisionTreeClassifier() + assert not hasattr(BaggingClassifier(base).fit(X, y), "decision_function") + + +def test_parallel_classification(): + # Check parallel classification. + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=0 + ) + + ensemble = BaggingClassifier( + DecisionTreeClassifier(), n_jobs=3, random_state=0 + ).fit(X_train, y_train) + + # predict_proba + y1 = ensemble.predict_proba(X_test) + ensemble.set_params(n_jobs=1) + y2 = ensemble.predict_proba(X_test) + assert_array_almost_equal(y1, y2) + + ensemble = BaggingClassifier( + DecisionTreeClassifier(), n_jobs=1, random_state=0 + ).fit(X_train, y_train) + + y3 = ensemble.predict_proba(X_test) + assert_array_almost_equal(y1, y3) + + # decision_function + ensemble = BaggingClassifier( + SVC(decision_function_shape="ovr"), n_jobs=3, random_state=0 + ).fit(X_train, y_train) + + decisions1 = ensemble.decision_function(X_test) + ensemble.set_params(n_jobs=1) + decisions2 = ensemble.decision_function(X_test) + assert_array_almost_equal(decisions1, decisions2) + + ensemble = BaggingClassifier( + SVC(decision_function_shape="ovr"), n_jobs=1, random_state=0 + ).fit(X_train, y_train) + + decisions3 = ensemble.decision_function(X_test) + assert_array_almost_equal(decisions1, decisions3) + + +def test_parallel_regression(): + # Check parallel regression. + rng = check_random_state(0) + + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit( + X_train, y_train + ) + + ensemble.set_params(n_jobs=1) + y1 = ensemble.predict(X_test) + ensemble.set_params(n_jobs=2) + y2 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y2) + + ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit( + X_train, y_train + ) + + y3 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y3) + + +def test_gridsearch(): + # Check that bagging ensembles can be grid-searched. + # Transform iris into a binary classification task + X, y = iris.data, iris.target + y[y == 2] = 1 + + # Grid search with scoring based on decision_function + parameters = {"n_estimators": (1, 2), "estimator__C": (1, 2)} + + GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) + + +def test_estimator(): + # Check estimator and its default values. + rng = check_random_state(0) + + # Classification + X_train, X_test, y_train, y_test = train_test_split( + iris.data, iris.target, random_state=rng + ) + + ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) + + assert isinstance(ensemble.estimator_, DecisionTreeClassifier) + + ensemble = BaggingClassifier( + DecisionTreeClassifier(), n_jobs=3, random_state=0 + ).fit(X_train, y_train) + + assert isinstance(ensemble.estimator_, DecisionTreeClassifier) + + ensemble = BaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit( + X_train, y_train + ) + + assert isinstance(ensemble.estimator_, Perceptron) + + # Regression + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data, diabetes.target, random_state=rng + ) + + ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train) + + assert isinstance(ensemble.estimator_, DecisionTreeRegressor) + + ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit( + X_train, y_train + ) + + assert isinstance(ensemble.estimator_, DecisionTreeRegressor) + + ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train) + assert isinstance(ensemble.estimator_, SVR) + + +def test_bagging_with_pipeline(): + estimator = BaggingClassifier( + make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2 + ) + estimator.fit(iris.data, iris.target) + assert isinstance(estimator[0].steps[-1][1].random_state, int) + + +class DummyZeroEstimator(BaseEstimator): + def fit(self, X, y): + self.classes_ = np.unique(y) + return self + + def predict(self, X): + return self.classes_[np.zeros(X.shape[0], dtype=int)] + + +def test_bagging_sample_weight_unsupported_but_passed(): + estimator = BaggingClassifier(DummyZeroEstimator()) + rng = check_random_state(0) + + estimator.fit(iris.data, iris.target).predict(iris.data) + with pytest.raises(ValueError): + estimator.fit( + iris.data, + iris.target, + sample_weight=rng.randint(10, size=(iris.data.shape[0])), + ) + + +def test_warm_start(random_state=42): + # Test if fitting incrementally with warm start gives a forest of the + # right size and the same results as a normal fit. + X, y = make_hastie_10_2(n_samples=20, random_state=1) + + clf_ws = None + for n_estimators in [5, 10]: + if clf_ws is None: + clf_ws = BaggingClassifier( + n_estimators=n_estimators, random_state=random_state, warm_start=True + ) + else: + clf_ws.set_params(n_estimators=n_estimators) + clf_ws.fit(X, y) + assert len(clf_ws) == n_estimators + + clf_no_ws = BaggingClassifier( + n_estimators=10, random_state=random_state, warm_start=False + ) + clf_no_ws.fit(X, y) + + assert set([tree.random_state for tree in clf_ws]) == set( + [tree.random_state for tree in clf_no_ws] + ) + + +def test_warm_start_smaller_n_estimators(): + # Test if warm start'ed second fit with smaller n_estimators raises error. + X, y = make_hastie_10_2(n_samples=20, random_state=1) + clf = BaggingClassifier(n_estimators=5, warm_start=True) + clf.fit(X, y) + clf.set_params(n_estimators=4) + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_warm_start_equal_n_estimators(): + # Test that nothing happens when fitting without increasing n_estimators + X, y = make_hastie_10_2(n_samples=20, random_state=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) + + clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83) + clf.fit(X_train, y_train) + + y_pred = clf.predict(X_test) + # modify X to nonsense values, this should not change anything + X_train += 1.0 + + warn_msg = "Warm-start fitting without increasing n_estimators does not" + with pytest.warns(UserWarning, match=warn_msg): + clf.fit(X_train, y_train) + assert_array_equal(y_pred, clf.predict(X_test)) + + +def test_warm_start_equivalence(): + # warm started classifier with 5+5 estimators should be equivalent to + # one classifier with 10 estimators + X, y = make_hastie_10_2(n_samples=20, random_state=1) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) + + clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) + clf_ws.fit(X_train, y_train) + clf_ws.set_params(n_estimators=10) + clf_ws.fit(X_train, y_train) + y1 = clf_ws.predict(X_test) + + clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) + clf.fit(X_train, y_train) + y2 = clf.predict(X_test) + + assert_array_almost_equal(y1, y2) + + +def test_warm_start_with_oob_score_fails(): + # Check using oob_score and warm_start simultaneously fails + X, y = make_hastie_10_2(n_samples=20, random_state=1) + clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) + with pytest.raises(ValueError): + clf.fit(X, y) + + +def test_oob_score_removed_on_warm_start(): + X, y = make_hastie_10_2(n_samples=100, random_state=1) + + clf = BaggingClassifier(n_estimators=5, oob_score=True) + clf.fit(X, y) + + clf.set_params(warm_start=True, oob_score=False, n_estimators=10) + clf.fit(X, y) + + with pytest.raises(AttributeError): + getattr(clf, "oob_score_") + + +def test_oob_score_consistency(): + # Make sure OOB scores are identical when random_state, estimator, and + # training data are fixed and fitting is done twice + X, y = make_hastie_10_2(n_samples=200, random_state=1) + bagging = BaggingClassifier( + KNeighborsClassifier(), + max_samples=0.5, + max_features=0.5, + oob_score=True, + random_state=1, + ) + assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_ + + +def test_estimators_samples(): + # Check that format of estimators_samples_ is correct and that results + # generated at fit time can be identically reproduced at a later time + # using data saved in object attributes. + X, y = make_hastie_10_2(n_samples=200, random_state=1) + bagging = BaggingClassifier( + LogisticRegression(), + max_samples=0.5, + max_features=0.5, + random_state=1, + bootstrap=False, + ) + bagging.fit(X, y) + + # Get relevant attributes + estimators_samples = bagging.estimators_samples_ + estimators_features = bagging.estimators_features_ + estimators = bagging.estimators_ + + # Test for correct formatting + assert len(estimators_samples) == len(estimators) + assert len(estimators_samples[0]) == len(X) // 2 + assert estimators_samples[0].dtype.kind == "i" + + # Re-fit single estimator to test for consistent sampling + estimator_index = 0 + estimator_samples = estimators_samples[estimator_index] + estimator_features = estimators_features[estimator_index] + estimator = estimators[estimator_index] + + X_train = (X[estimator_samples])[:, estimator_features] + y_train = y[estimator_samples] + + orig_coefs = estimator.coef_ + estimator.fit(X_train, y_train) + new_coefs = estimator.coef_ + + assert_array_almost_equal(orig_coefs, new_coefs) + + +def test_estimators_samples_deterministic(): + # This test is a regression test to check that with a random step + # (e.g. SparseRandomProjection) and a given random state, the results + # generated at fit time can be identically reproduced at a later time using + # data saved in object attributes. Check issue #9524 for full discussion. + + iris = load_iris() + X, y = iris.data, iris.target + + base_pipeline = make_pipeline( + SparseRandomProjection(n_components=2), LogisticRegression() + ) + clf = BaggingClassifier(estimator=base_pipeline, max_samples=0.5, random_state=0) + clf.fit(X, y) + pipeline_estimator_coef = clf.estimators_[0].steps[-1][1].coef_.copy() + + estimator = clf.estimators_[0] + estimator_sample = clf.estimators_samples_[0] + estimator_feature = clf.estimators_features_[0] + + X_train = (X[estimator_sample])[:, estimator_feature] + y_train = y[estimator_sample] + + estimator.fit(X_train, y_train) + assert_array_equal(estimator.steps[-1][1].coef_, pipeline_estimator_coef) + + +def test_max_samples_consistency(): + # Make sure validated max_samples and original max_samples are identical + # when valid integer max_samples supplied by user + max_samples = 100 + X, y = make_hastie_10_2(n_samples=2 * max_samples, random_state=1) + bagging = BaggingClassifier( + KNeighborsClassifier(), + max_samples=max_samples, + max_features=0.5, + random_state=1, + ) + bagging.fit(X, y) + assert bagging._max_samples == max_samples + + +def test_set_oob_score_label_encoding(): + # Make sure the oob_score doesn't change when the labels change + # See: https://github.com/scikit-learn/scikit-learn/issues/8933 + random_state = 5 + X = [[-1], [0], [1]] * 5 + Y1 = ["A", "B", "C"] * 5 + Y2 = [-1, 0, 1] * 5 + Y3 = [0, 1, 2] * 5 + x1 = ( + BaggingClassifier(oob_score=True, random_state=random_state) + .fit(X, Y1) + .oob_score_ + ) + x2 = ( + BaggingClassifier(oob_score=True, random_state=random_state) + .fit(X, Y2) + .oob_score_ + ) + x3 = ( + BaggingClassifier(oob_score=True, random_state=random_state) + .fit(X, Y3) + .oob_score_ + ) + assert [x1, x2] == [x3, x3] + + +def replace(X): + X = X.astype("float", copy=True) + X[~np.isfinite(X)] = 0 + return X + + +def test_bagging_regressor_with_missing_inputs(): + # Check that BaggingRegressor can accept X with missing/infinite data + X = np.array( + [ + [1, 3, 5], + [2, None, 6], + [2, np.nan, 6], + [2, np.inf, 6], + [2, -np.inf, 6], + ] + ) + y_values = [ + np.array([2, 3, 3, 3, 3]), + np.array( + [ + [2, 1, 9], + [3, 6, 8], + [3, 6, 8], + [3, 6, 8], + [3, 6, 8], + ] + ), + ] + for y in y_values: + regressor = DecisionTreeRegressor() + pipeline = make_pipeline(FunctionTransformer(replace), regressor) + pipeline.fit(X, y).predict(X) + bagging_regressor = BaggingRegressor(pipeline) + y_hat = bagging_regressor.fit(X, y).predict(X) + assert y.shape == y_hat.shape + + # Verify that exceptions can be raised by wrapper regressor + regressor = DecisionTreeRegressor() + pipeline = make_pipeline(regressor) + with pytest.raises(ValueError): + pipeline.fit(X, y) + bagging_regressor = BaggingRegressor(pipeline) + with pytest.raises(ValueError): + bagging_regressor.fit(X, y) + + +def test_bagging_classifier_with_missing_inputs(): + # Check that BaggingClassifier can accept X with missing/infinite data + X = np.array( + [ + [1, 3, 5], + [2, None, 6], + [2, np.nan, 6], + [2, np.inf, 6], + [2, -np.inf, 6], + ] + ) + y = np.array([3, 6, 6, 6, 6]) + classifier = DecisionTreeClassifier() + pipeline = make_pipeline(FunctionTransformer(replace), classifier) + pipeline.fit(X, y).predict(X) + bagging_classifier = BaggingClassifier(pipeline) + bagging_classifier.fit(X, y) + y_hat = bagging_classifier.predict(X) + assert y.shape == y_hat.shape + bagging_classifier.predict_log_proba(X) + bagging_classifier.predict_proba(X) + + # Verify that exceptions can be raised by wrapper classifier + classifier = DecisionTreeClassifier() + pipeline = make_pipeline(classifier) + with pytest.raises(ValueError): + pipeline.fit(X, y) + bagging_classifier = BaggingClassifier(pipeline) + with pytest.raises(ValueError): + bagging_classifier.fit(X, y) + + +def test_bagging_small_max_features(): + # Check that Bagging estimator can accept low fractional max_features + + X = np.array([[1, 2], [3, 4]]) + y = np.array([1, 0]) + + bagging = BaggingClassifier(LogisticRegression(), max_features=0.3, random_state=1) + bagging.fit(X, y) + + +def test_bagging_get_estimators_indices(): + # Check that Bagging estimator can generate sample indices properly + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/16436 + + rng = np.random.RandomState(0) + X = rng.randn(13, 4) + y = np.arange(13) + + class MyEstimator(DecisionTreeRegressor): + """An estimator which stores y indices information at fit.""" + + def fit(self, X, y): + self._sample_indices = y + + clf = BaggingRegressor(estimator=MyEstimator(), n_estimators=1, random_state=0) + clf.fit(X, y) + + assert_array_equal(clf.estimators_[0]._sample_indices, clf.estimators_samples_[0]) + + +@pytest.mark.parametrize( + "bagging, expected_allow_nan", + [ + (BaggingClassifier(HistGradientBoostingClassifier(max_iter=1)), True), + (BaggingRegressor(HistGradientBoostingRegressor(max_iter=1)), True), + (BaggingClassifier(LogisticRegression()), False), + (BaggingRegressor(SVR()), False), + ], +) +def test_bagging_allow_nan_tag(bagging, expected_allow_nan): + """Check that bagging inherits allow_nan tag.""" + assert bagging._get_tags()["allow_nan"] == expected_allow_nan diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..aa06edc19e756fa4bb7915071dd5ab911431db10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py @@ -0,0 +1,109 @@ +""" +Testing for the base module (sklearn.ensemble.base). +""" + +# Authors: Gilles Louppe +# License: BSD 3 clause + +from collections import OrderedDict + +import numpy as np + +from sklearn.datasets import load_iris +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.ensemble import BaggingClassifier +from sklearn.ensemble._base import _set_random_states +from sklearn.feature_selection import SelectFromModel +from sklearn.linear_model import Perceptron +from sklearn.pipeline import Pipeline + + +def test_base(): + # Check BaseEnsemble methods. + ensemble = BaggingClassifier( + estimator=Perceptron(random_state=None), n_estimators=3 + ) + + iris = load_iris() + ensemble.fit(iris.data, iris.target) + ensemble.estimators_ = [] # empty the list and create estimators manually + + ensemble._make_estimator() + random_state = np.random.RandomState(3) + ensemble._make_estimator(random_state=random_state) + ensemble._make_estimator(random_state=random_state) + ensemble._make_estimator(append=False) + + assert 3 == len(ensemble) + assert 3 == len(ensemble.estimators_) + + assert isinstance(ensemble[0], Perceptron) + assert ensemble[0].random_state is None + assert isinstance(ensemble[1].random_state, int) + assert isinstance(ensemble[2].random_state, int) + assert ensemble[1].random_state != ensemble[2].random_state + + np_int_ensemble = BaggingClassifier( + estimator=Perceptron(), n_estimators=np.int32(3) + ) + np_int_ensemble.fit(iris.data, iris.target) + + +def test_set_random_states(): + # Linear Discriminant Analysis doesn't have random state: smoke test + _set_random_states(LinearDiscriminantAnalysis(), random_state=17) + + clf1 = Perceptron(random_state=None) + assert clf1.random_state is None + # check random_state is None still sets + _set_random_states(clf1, None) + assert isinstance(clf1.random_state, int) + + # check random_state fixes results in consistent initialisation + _set_random_states(clf1, 3) + assert isinstance(clf1.random_state, int) + clf2 = Perceptron(random_state=None) + _set_random_states(clf2, 3) + assert clf1.random_state == clf2.random_state + + # nested random_state + + def make_steps(): + return [ + ("sel", SelectFromModel(Perceptron(random_state=None))), + ("clf", Perceptron(random_state=None)), + ] + + est1 = Pipeline(make_steps()) + _set_random_states(est1, 3) + assert isinstance(est1.steps[0][1].estimator.random_state, int) + assert isinstance(est1.steps[1][1].random_state, int) + assert ( + est1.get_params()["sel__estimator__random_state"] + != est1.get_params()["clf__random_state"] + ) + + # ensure multiple random_state parameters are invariant to get_params() + # iteration order + + class AlphaParamPipeline(Pipeline): + def get_params(self, *args, **kwargs): + params = Pipeline.get_params(self, *args, **kwargs).items() + return OrderedDict(sorted(params)) + + class RevParamPipeline(Pipeline): + def get_params(self, *args, **kwargs): + params = Pipeline.get_params(self, *args, **kwargs).items() + return OrderedDict(sorted(params, reverse=True)) + + for cls in [AlphaParamPipeline, RevParamPipeline]: + est2 = cls(make_steps()) + _set_random_states(est2, 3) + assert ( + est1.get_params()["sel__estimator__random_state"] + == est2.get_params()["sel__estimator__random_state"] + ) + assert ( + est1.get_params()["clf__random_state"] + == est2.get_params()["clf__random_state"] + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..7e14b34993d6fd89bfd473ea9b2ab4ce31adc816 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_common.py @@ -0,0 +1,262 @@ +import numpy as np +import pytest + +from sklearn.base import ClassifierMixin, clone, is_classifier +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + StackingClassifier, + StackingRegressor, + VotingClassifier, + VotingRegressor, +) +from sklearn.impute import SimpleImputer +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.pipeline import make_pipeline +from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR + +X, y = load_iris(return_X_y=True) + +X_r, y_r = load_diabetes(return_X_y=True) + + +@pytest.mark.parametrize( + "X, y, estimator", + [ + ( + *make_classification(n_samples=10), + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(dual="auto")), + ("rf", RandomForestClassifier(n_estimators=5, max_depth=3)), + ], + cv=2, + ), + ), + ( + *make_classification(n_samples=10), + VotingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(dual="auto")), + ("rf", RandomForestClassifier(n_estimators=5, max_depth=3)), + ] + ), + ), + ( + *make_regression(n_samples=10), + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto")), + ("rf", RandomForestRegressor(n_estimators=5, max_depth=3)), + ], + cv=2, + ), + ), + ( + *make_regression(n_samples=10), + VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto")), + ("rf", RandomForestRegressor(n_estimators=5, max_depth=3)), + ] + ), + ), + ], + ids=[ + "stacking-classifier", + "voting-classifier", + "stacking-regressor", + "voting-regressor", + ], +) +def test_ensemble_heterogeneous_estimators_behavior(X, y, estimator): + # check that the behavior of `estimators`, `estimators_`, + # `named_estimators`, `named_estimators_` is consistent across all + # ensemble classes and when using `set_params()`. + + # before fit + assert "svm" in estimator.named_estimators + assert estimator.named_estimators.svm is estimator.estimators[1][1] + assert estimator.named_estimators.svm is estimator.named_estimators["svm"] + + # check fitted attributes + estimator.fit(X, y) + assert len(estimator.named_estimators) == 3 + assert len(estimator.named_estimators_) == 3 + assert sorted(list(estimator.named_estimators_.keys())) == sorted( + ["lr", "svm", "rf"] + ) + + # check that set_params() does not add a new attribute + estimator_new_params = clone(estimator) + svm_estimator = SVC() if is_classifier(estimator) else SVR() + estimator_new_params.set_params(svm=svm_estimator).fit(X, y) + assert not hasattr(estimator_new_params, "svm") + assert ( + estimator_new_params.named_estimators.lr.get_params() + == estimator.named_estimators.lr.get_params() + ) + assert ( + estimator_new_params.named_estimators.rf.get_params() + == estimator.named_estimators.rf.get_params() + ) + + # check the behavior when setting an dropping an estimator + estimator_dropped = clone(estimator) + estimator_dropped.set_params(svm="drop") + estimator_dropped.fit(X, y) + assert len(estimator_dropped.named_estimators) == 3 + assert estimator_dropped.named_estimators.svm == "drop" + assert len(estimator_dropped.named_estimators_) == 3 + assert sorted(list(estimator_dropped.named_estimators_.keys())) == sorted( + ["lr", "svm", "rf"] + ) + for sub_est in estimator_dropped.named_estimators_: + # check that the correspondence is correct + assert not isinstance(sub_est, type(estimator.named_estimators.svm)) + + # check that we can set the parameters of the underlying classifier + estimator.set_params(svm__C=10.0) + estimator.set_params(rf__max_depth=5) + assert ( + estimator.get_params()["svm__C"] + == estimator.get_params()["svm"].get_params()["C"] + ) + assert ( + estimator.get_params()["rf__max_depth"] + == estimator.get_params()["rf"].get_params()["max_depth"] + ) + + +@pytest.mark.parametrize( + "Ensemble", + [VotingClassifier, StackingRegressor, VotingRegressor], +) +def test_ensemble_heterogeneous_estimators_type(Ensemble): + # check that ensemble will fail during validation if the underlying + # estimators are not of the same type (i.e. classifier or regressor) + # StackingClassifier can have an underlying regresor so it's not checked + if issubclass(Ensemble, ClassifierMixin): + X, y = make_classification(n_samples=10) + estimators = [("lr", LinearRegression())] + ensemble_type = "classifier" + else: + X, y = make_regression(n_samples=10) + estimators = [("lr", LogisticRegression())] + ensemble_type = "regressor" + ensemble = Ensemble(estimators=estimators) + + err_msg = "should be a {}".format(ensemble_type) + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + +@pytest.mark.parametrize( + "X, y, Ensemble", + [ + (*make_classification(n_samples=10), StackingClassifier), + (*make_classification(n_samples=10), VotingClassifier), + (*make_regression(n_samples=10), StackingRegressor), + (*make_regression(n_samples=10), VotingRegressor), + ], +) +def test_ensemble_heterogeneous_estimators_name_validation(X, y, Ensemble): + # raise an error when the name contains dunder + if issubclass(Ensemble, ClassifierMixin): + estimators = [("lr__", LogisticRegression())] + else: + estimators = [("lr__", LinearRegression())] + ensemble = Ensemble(estimators=estimators) + + err_msg = r"Estimator names must not contain __: got \['lr__'\]" + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + # raise an error when the name is not unique + if issubclass(Ensemble, ClassifierMixin): + estimators = [("lr", LogisticRegression()), ("lr", LogisticRegression())] + else: + estimators = [("lr", LinearRegression()), ("lr", LinearRegression())] + ensemble = Ensemble(estimators=estimators) + + err_msg = r"Names provided are not unique: \['lr', 'lr'\]" + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + # raise an error when the name conflicts with the parameters + if issubclass(Ensemble, ClassifierMixin): + estimators = [("estimators", LogisticRegression())] + else: + estimators = [("estimators", LinearRegression())] + ensemble = Ensemble(estimators=estimators) + + err_msg = "Estimator names conflict with constructor arguments" + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + +@pytest.mark.parametrize( + "X, y, estimator", + [ + ( + *make_classification(n_samples=10), + StackingClassifier(estimators=[("lr", LogisticRegression())]), + ), + ( + *make_classification(n_samples=10), + VotingClassifier(estimators=[("lr", LogisticRegression())]), + ), + ( + *make_regression(n_samples=10), + StackingRegressor(estimators=[("lr", LinearRegression())]), + ), + ( + *make_regression(n_samples=10), + VotingRegressor(estimators=[("lr", LinearRegression())]), + ), + ], + ids=[ + "stacking-classifier", + "voting-classifier", + "stacking-regressor", + "voting-regressor", + ], +) +def test_ensemble_heterogeneous_estimators_all_dropped(X, y, estimator): + # check that we raise a consistent error when all estimators are + # dropped + estimator.set_params(lr="drop") + with pytest.raises(ValueError, match="All estimators are dropped."): + estimator.fit(X, y) + + +@pytest.mark.parametrize( + "Ensemble, Estimator, X, y", + [ + (StackingClassifier, LogisticRegression, X, y), + (StackingRegressor, LinearRegression, X_r, y_r), + (VotingClassifier, LogisticRegression, X, y), + (VotingRegressor, LinearRegression, X_r, y_r), + ], +) +# FIXME: we should move this test in `estimator_checks` once we are able +# to construct meta-estimator instances +def test_heterogeneous_ensemble_support_missing_values(Ensemble, Estimator, X, y): + # check that Voting and Stacking predictor delegate the missing values + # validation to the underlying estimator. + X = X.copy() + mask = np.random.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool) + X[mask] = np.nan + pipe = make_pipeline(SimpleImputer(), Estimator()) + ensemble = Ensemble(estimators=[("pipe1", pipe), ("pipe2", pipe)]) + ensemble.fit(X, y).score(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py new file mode 100644 index 0000000000000000000000000000000000000000..2468f8fc5b590aa3b7aa3211002a1af11e2ea8bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_forest.py @@ -0,0 +1,1856 @@ +""" +Testing for the forest module (sklearn.ensemble.forest). +""" + +# Authors: Gilles Louppe, +# Brian Holt, +# Andreas Mueller, +# Arnaud Joly +# License: BSD 3 clause + +import itertools +import math +import pickle +from collections import defaultdict +from functools import partial +from itertools import combinations, product +from typing import Any, Dict +from unittest.mock import patch + +import joblib +import numpy as np +import pytest +from scipy.special import comb + +import sklearn +from sklearn import clone, datasets +from sklearn.datasets import make_classification, make_hastie_10_2 +from sklearn.decomposition import TruncatedSVD +from sklearn.dummy import DummyRegressor +from sklearn.ensemble import ( + ExtraTreesClassifier, + ExtraTreesRegressor, + RandomForestClassifier, + RandomForestRegressor, + RandomTreesEmbedding, +) +from sklearn.ensemble._forest import ( + _generate_unsampled_indices, + _get_n_samples_bootstrap, +) +from sklearn.exceptions import NotFittedError +from sklearn.metrics import ( + explained_variance_score, + f1_score, + mean_poisson_deviance, + mean_squared_error, +) +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.svm import LinearSVC +from sklearn.tree._classes import SPARSE_SPLITTERS +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, + skip_if_no_parallel, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.multiclass import type_of_target +from sklearn.utils.parallel import Parallel +from sklearn.utils.validation import check_random_state + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# Larger classification sample used for testing feature importances +X_large, y_large = datasets.make_classification( + n_samples=500, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=0, +) + +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +rng = check_random_state(0) +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + +# Make regression dataset +X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1) + +# also make a hastie_10_2 dataset +hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1) +hastie_X = hastie_X.astype(np.float32) + +# Get the default backend in joblib to test parallelism and interaction with +# different backends +DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__ + +FOREST_CLASSIFIERS = { + "ExtraTreesClassifier": ExtraTreesClassifier, + "RandomForestClassifier": RandomForestClassifier, +} + +FOREST_REGRESSORS = { + "ExtraTreesRegressor": ExtraTreesRegressor, + "RandomForestRegressor": RandomForestRegressor, +} + +FOREST_TRANSFORMERS = { + "RandomTreesEmbedding": RandomTreesEmbedding, +} + +FOREST_ESTIMATORS: Dict[str, Any] = dict() +FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) +FOREST_ESTIMATORS.update(FOREST_REGRESSORS) +FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) + +FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy() +FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_classification_toy(name): + """Check classification on a toy dataset.""" + ForestClassifier = FOREST_CLASSIFIERS[name] + + clf = ForestClassifier(n_estimators=10, random_state=1) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 10 == len(clf) + + clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 10 == len(clf) + + # also test apply + leaf_indices = clf.apply(X) + assert leaf_indices.shape == (len(X), clf.n_estimators) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +@pytest.mark.parametrize("criterion", ("gini", "log_loss")) +def test_iris_criterion(name, criterion): + # Check consistency on dataset iris. + ForestClassifier = FOREST_CLASSIFIERS[name] + + clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) + clf.fit(iris.data, iris.target) + score = clf.score(iris.data, iris.target) + assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score) + + clf = ForestClassifier( + n_estimators=10, criterion=criterion, max_features=2, random_state=1 + ) + clf.fit(iris.data, iris.target) + score = clf.score(iris.data, iris.target) + assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score) + + +@pytest.mark.parametrize("name", FOREST_REGRESSORS) +@pytest.mark.parametrize( + "criterion", ("squared_error", "absolute_error", "friedman_mse") +) +def test_regression_criterion(name, criterion): + # Check consistency on regression dataset. + ForestRegressor = FOREST_REGRESSORS[name] + + reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) + reg.fit(X_reg, y_reg) + score = reg.score(X_reg, y_reg) + assert ( + score > 0.93 + ), "Failed with max_features=None, criterion %s and score = %f" % ( + criterion, + score, + ) + + reg = ForestRegressor( + n_estimators=5, criterion=criterion, max_features=6, random_state=1 + ) + reg.fit(X_reg, y_reg) + score = reg.score(X_reg, y_reg) + assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % ( + criterion, + score, + ) + + +def test_poisson_vs_mse(): + """Test that random forest with poisson criterion performs better than + mse for a poisson target. + + There is a similar test for DecisionTreeRegressor. + """ + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 500, 10 + X = datasets.make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + # We create a log-linear Poisson model and downscale coef as it will get + # exponentiated. + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=n_test, random_state=rng + ) + # We prevent some overfitting by setting min_samples_split=10. + forest_poi = RandomForestRegressor( + criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng + ) + forest_mse = RandomForestRegressor( + criterion="squared_error", + min_samples_leaf=10, + max_features="sqrt", + random_state=rng, + ) + + forest_poi.fit(X_train, y_train) + forest_mse.fit(X_train, y_train) + dummy = DummyRegressor(strategy="mean").fit(X_train, y_train) + + for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]: + metric_poi = mean_poisson_deviance(y, forest_poi.predict(X)) + # squared_error forest might produce non-positive predictions => clip + # If y = 0 for those, the poisson deviance gets too good. + # If we drew more samples, we would eventually get y > 0 and the + # poisson deviance would explode, i.e. be undefined. Therefore, we do + # not clip to a tiny value like 1e-15, but to 1e-6. This acts like a + # small penalty to the non-positive predictions. + metric_mse = mean_poisson_deviance( + y, np.clip(forest_mse.predict(X), 1e-6, None) + ) + metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) + # As squared_error might correctly predict 0 in train set, its train + # score can be better than Poisson. This is no longer the case for the + # test set. But keep the above comment for clipping in mind. + if data_name == "test": + assert metric_poi < metric_mse + assert metric_poi < 0.8 * metric_dummy + + +@pytest.mark.parametrize("criterion", ("poisson", "squared_error")) +def test_balance_property_random_forest(criterion): + """ "Test that sum(y_pred)==sum(y_true) on the training set.""" + rng = np.random.RandomState(42) + n_train, n_test, n_features = 500, 500, 10 + X = datasets.make_low_rank_matrix( + n_samples=n_train + n_test, n_features=n_features, random_state=rng + ) + + coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) + y = rng.poisson(lam=np.exp(X @ coef)) + + reg = RandomForestRegressor( + criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng + ) + reg.fit(X, y) + + assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y)) + + +@pytest.mark.parametrize("name", FOREST_REGRESSORS) +def test_regressor_attributes(name): + # Regression models should not have a classes_ attribute. + r = FOREST_REGRESSORS[name](random_state=0) + assert not hasattr(r, "classes_") + assert not hasattr(r, "n_classes_") + + r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) + assert not hasattr(r, "classes_") + assert not hasattr(r, "n_classes_") + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_probability(name): + # Predict probabilities. + ForestClassifier = FOREST_CLASSIFIERS[name] + with np.errstate(divide="ignore"): + clf = ForestClassifier( + n_estimators=10, random_state=1, max_features=1, max_depth=1 + ) + clf.fit(iris.data, iris.target) + assert_array_almost_equal( + np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0]) + ) + assert_array_almost_equal( + clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)) + ) + + +@pytest.mark.parametrize("dtype", (np.float64, np.float32)) +@pytest.mark.parametrize( + "name, criterion", + itertools.chain( + product(FOREST_CLASSIFIERS, ["gini", "log_loss"]), + product(FOREST_REGRESSORS, ["squared_error", "friedman_mse", "absolute_error"]), + ), +) +def test_importances(dtype, name, criterion): + tolerance = 0.01 + if name in FOREST_REGRESSORS and criterion == "absolute_error": + tolerance = 0.05 + + # cast as dtype + X = X_large.astype(dtype, copy=False) + y = y_large.astype(dtype, copy=False) + + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0) + est.fit(X, y) + importances = est.feature_importances_ + + # The forest estimator can detect that only the first 3 features of the + # dataset are informative: + n_important = np.sum(importances > 0.1) + assert importances.shape[0] == 10 + assert n_important == 3 + assert np.all(importances[:3] > 0.1) + + # Check with parallel + importances = est.feature_importances_ + est.set_params(n_jobs=2) + importances_parallel = est.feature_importances_ + assert_array_almost_equal(importances, importances_parallel) + + # Check with sample weights + sample_weight = check_random_state(0).randint(1, 10, len(X)) + est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) + est.fit(X, y, sample_weight=sample_weight) + importances = est.feature_importances_ + assert np.all(importances >= 0.0) + + for scale in [0.5, 100]: + est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion) + est.fit(X, y, sample_weight=scale * sample_weight) + importances_bis = est.feature_importances_ + assert np.abs(importances - importances_bis).mean() < tolerance + + +def test_importances_asymptotic(): + # Check whether variable importances of totally randomized trees + # converge towards their theoretical values (See Louppe et al, + # Understanding variable importances in forests of randomized trees, 2013). + + def binomial(k, n): + return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True) + + def entropy(samples): + n_samples = len(samples) + entropy = 0.0 + + for count in np.bincount(samples): + p = 1.0 * count / n_samples + if p > 0: + entropy -= p * np.log2(p) + + return entropy + + def mdi_importance(X_m, X, y): + n_samples, n_features = X.shape + + features = list(range(n_features)) + features.pop(X_m) + values = [np.unique(X[:, i]) for i in range(n_features)] + + imp = 0.0 + + for k in range(n_features): + # Weight of each B of size k + coef = 1.0 / (binomial(k, n_features) * (n_features - k)) + + # For all B of size k + for B in combinations(features, k): + # For all values B=b + for b in product(*[values[B[j]] for j in range(k)]): + mask_b = np.ones(n_samples, dtype=bool) + + for j in range(k): + mask_b &= X[:, B[j]] == b[j] + + X_, y_ = X[mask_b, :], y[mask_b] + n_samples_b = len(X_) + + if n_samples_b > 0: + children = [] + + for xi in values[X_m]: + mask_xi = X_[:, X_m] == xi + children.append(y_[mask_xi]) + + imp += ( + coef + * (1.0 * n_samples_b / n_samples) # P(B=b) + * ( + entropy(y_) + - sum( + [ + entropy(c) * len(c) / n_samples_b + for c in children + ] + ) + ) + ) + + return imp + + data = np.array( + [ + [0, 0, 1, 0, 0, 1, 0, 1], + [1, 0, 1, 1, 1, 0, 1, 2], + [1, 0, 1, 1, 0, 1, 1, 3], + [0, 1, 1, 1, 0, 1, 0, 4], + [1, 1, 0, 1, 0, 1, 1, 5], + [1, 1, 0, 1, 1, 1, 1, 6], + [1, 0, 1, 0, 0, 1, 0, 7], + [1, 1, 1, 1, 1, 1, 1, 8], + [1, 1, 1, 1, 0, 1, 1, 9], + [1, 1, 1, 0, 1, 1, 1, 0], + ] + ) + + X, y = np.array(data[:, :7], dtype=bool), data[:, 7] + n_features = X.shape[1] + + # Compute true importances + true_importances = np.zeros(n_features) + + for i in range(n_features): + true_importances[i] = mdi_importance(i, X, y) + + # Estimate importances with totally randomized trees + clf = ExtraTreesClassifier( + n_estimators=500, max_features=1, criterion="log_loss", random_state=0 + ).fit(X, y) + + importances = ( + sum( + tree.tree_.compute_feature_importances(normalize=False) + for tree in clf.estimators_ + ) + / clf.n_estimators + ) + + # Check correctness + assert_almost_equal(entropy(y), sum(importances)) + assert np.abs(true_importances - importances).mean() < 0.01 + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_unfitted_feature_importances(name): + err_msg = ( + "This {} instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator.".format(name) + ) + with pytest.raises(NotFittedError, match=err_msg): + getattr(FOREST_ESTIMATORS[name](), "feature_importances_") + + +@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values()) +@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"]) +@pytest.mark.parametrize( + "X, y, lower_bound_accuracy", + [ + ( + *datasets.make_classification(n_samples=300, n_classes=2, random_state=0), + 0.9, + ), + ( + *datasets.make_classification( + n_samples=1000, n_classes=3, n_informative=6, random_state=0 + ), + 0.65, + ), + ( + iris.data, + iris.target * 2 + 1, + 0.65, + ), + ( + *datasets.make_multilabel_classification(n_samples=300, random_state=0), + 0.18, + ), + ], +) +@pytest.mark.parametrize("oob_score", [True, partial(f1_score, average="micro")]) +def test_forest_classifier_oob( + ForestClassifier, X, y, X_type, lower_bound_accuracy, oob_score +): + """Check that OOB score is close to score on a test set.""" + X = _convert_container(X, constructor_name=X_type) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + test_size=0.5, + random_state=0, + ) + classifier = ForestClassifier( + n_estimators=40, + bootstrap=True, + oob_score=oob_score, + random_state=0, + ) + + assert not hasattr(classifier, "oob_score_") + assert not hasattr(classifier, "oob_decision_function_") + + classifier.fit(X_train, y_train) + if callable(oob_score): + test_score = oob_score(y_test, classifier.predict(X_test)) + else: + test_score = classifier.score(X_test, y_test) + assert classifier.oob_score_ >= lower_bound_accuracy + + assert abs(test_score - classifier.oob_score_) <= 0.1 + + assert hasattr(classifier, "oob_score_") + assert not hasattr(classifier, "oob_prediction_") + assert hasattr(classifier, "oob_decision_function_") + + if y.ndim == 1: + expected_shape = (X_train.shape[0], len(set(y))) + else: + expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1]) + assert classifier.oob_decision_function_.shape == expected_shape + + +@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values()) +@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"]) +@pytest.mark.parametrize( + "X, y, lower_bound_r2", + [ + ( + *datasets.make_regression( + n_samples=500, n_features=10, n_targets=1, random_state=0 + ), + 0.7, + ), + ( + *datasets.make_regression( + n_samples=500, n_features=10, n_targets=2, random_state=0 + ), + 0.55, + ), + ], +) +@pytest.mark.parametrize("oob_score", [True, explained_variance_score]) +def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score): + """Check that forest-based regressor provide an OOB score close to the + score on a test set.""" + X = _convert_container(X, constructor_name=X_type) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + test_size=0.5, + random_state=0, + ) + regressor = ForestRegressor( + n_estimators=50, + bootstrap=True, + oob_score=oob_score, + random_state=0, + ) + + assert not hasattr(regressor, "oob_score_") + assert not hasattr(regressor, "oob_prediction_") + + regressor.fit(X_train, y_train) + if callable(oob_score): + test_score = oob_score(y_test, regressor.predict(X_test)) + else: + test_score = regressor.score(X_test, y_test) + assert regressor.oob_score_ >= lower_bound_r2 + + assert abs(test_score - regressor.oob_score_) <= 0.1 + + assert hasattr(regressor, "oob_score_") + assert hasattr(regressor, "oob_prediction_") + assert not hasattr(regressor, "oob_decision_function_") + + if y.ndim == 1: + expected_shape = (X_train.shape[0],) + else: + expected_shape = (X_train.shape[0], y.ndim) + assert regressor.oob_prediction_.shape == expected_shape + + +@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values()) +def test_forest_oob_warning(ForestEstimator): + """Check that a warning is raised when not enough estimator and the OOB + estimates will be inaccurate.""" + estimator = ForestEstimator( + n_estimators=1, + oob_score=True, + bootstrap=True, + random_state=0, + ) + with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"): + estimator.fit(iris.data, iris.target) + + +@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values()) +def test_forest_oob_score_requires_bootstrap(ForestEstimator): + """Check that we raise an error if OOB score is requested without + activating bootstrapping. + """ + X = iris.data + y = iris.target + err_msg = "Out of bag estimation only available if bootstrap=True" + estimator = ForestEstimator(oob_score=True, bootstrap=False) + with pytest.raises(ValueError, match=err_msg): + estimator.fit(X, y) + + +@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values()) +def test_classifier_error_oob_score_multiclass_multioutput(ForestClassifier): + """Check that we raise an error with when requesting OOB score with + multiclass-multioutput classification target. + """ + rng = np.random.RandomState(42) + X = iris.data + y = rng.randint(low=0, high=5, size=(iris.data.shape[0], 2)) + y_type = type_of_target(y) + assert y_type == "multiclass-multioutput" + estimator = ForestClassifier(oob_score=True, bootstrap=True) + err_msg = "The type of target cannot be used to compute OOB estimates" + with pytest.raises(ValueError, match=err_msg): + estimator.fit(X, y) + + +@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values()) +def test_forest_multioutput_integral_regression_target(ForestRegressor): + """Check that multioutput regression with integral values is not interpreted + as a multiclass-multioutput target and OOB score can be computed. + """ + rng = np.random.RandomState(42) + X = iris.data + y = rng.randint(low=0, high=10, size=(iris.data.shape[0], 2)) + estimator = ForestRegressor( + n_estimators=30, oob_score=True, bootstrap=True, random_state=0 + ) + estimator.fit(X, y) + + n_samples_bootstrap = _get_n_samples_bootstrap(len(X), estimator.max_samples) + n_samples_test = X.shape[0] // 4 + oob_pred = np.zeros([n_samples_test, 2]) + for sample_idx, sample in enumerate(X[:n_samples_test]): + n_samples_oob = 0 + oob_pred_sample = np.zeros(2) + for tree in estimator.estimators_: + oob_unsampled_indices = _generate_unsampled_indices( + tree.random_state, len(X), n_samples_bootstrap + ) + if sample_idx in oob_unsampled_indices: + n_samples_oob += 1 + oob_pred_sample += tree.predict(sample.reshape(1, -1)).squeeze() + oob_pred[sample_idx] = oob_pred_sample / n_samples_oob + assert_allclose(oob_pred, estimator.oob_prediction_[:n_samples_test]) + + +@pytest.mark.parametrize("oob_score", [True, False]) +def test_random_trees_embedding_raise_error_oob(oob_score): + with pytest.raises(TypeError, match="got an unexpected keyword argument"): + RandomTreesEmbedding(oob_score=oob_score) + with pytest.raises(NotImplementedError, match="OOB score not supported"): + RandomTreesEmbedding()._set_oob_score_and_attributes(X, y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_gridsearch(name): + # Check that base trees can be grid-searched. + forest = FOREST_CLASSIFIERS[name]() + clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)}) + clf.fit(iris.data, iris.target) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_parallel(name): + """Check parallel computations in classification""" + if name in FOREST_CLASSIFIERS: + X = iris.data + y = iris.target + elif name in FOREST_REGRESSORS: + X = X_reg + y = y_reg + + ForestEstimator = FOREST_ESTIMATORS[name] + forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) + + forest.fit(X, y) + assert len(forest) == 10 + + forest.set_params(n_jobs=1) + y1 = forest.predict(X) + forest.set_params(n_jobs=2) + y2 = forest.predict(X) + assert_array_almost_equal(y1, y2, 3) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_pickle(name): + # Check pickability. + if name in FOREST_CLASSIFIERS: + X = iris.data[::2] + y = iris.target[::2] + elif name in FOREST_REGRESSORS: + X = X_reg[::2] + y = y_reg[::2] + + ForestEstimator = FOREST_ESTIMATORS[name] + obj = ForestEstimator(random_state=0) + obj.fit(X, y) + score = obj.score(X, y) + pickle_object = pickle.dumps(obj) + + obj2 = pickle.loads(pickle_object) + assert type(obj2) == obj.__class__ + score2 = obj2.score(X, y) + assert score == score2 + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_multioutput(name): + # Check estimators on multi-output problems. + + X_train = [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-2, 1], + [-1, 1], + [-1, 2], + [2, -1], + [1, -1], + [1, -2], + ] + y_train = [ + [-1, 0], + [-1, 0], + [-1, 0], + [1, 1], + [1, 1], + [1, 1], + [-1, 2], + [-1, 2], + [-1, 2], + [1, 3], + [1, 3], + [1, 3], + ] + X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] + y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] + + est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) + y_pred = est.fit(X_train, y_train).predict(X_test) + assert_array_almost_equal(y_pred, y_test) + + if name in FOREST_CLASSIFIERS: + with np.errstate(divide="ignore"): + proba = est.predict_proba(X_test) + assert len(proba) == 2 + assert proba[0].shape == (4, 2) + assert proba[1].shape == (4, 4) + + log_proba = est.predict_log_proba(X_test) + assert len(log_proba) == 2 + assert log_proba[0].shape == (4, 2) + assert log_proba[1].shape == (4, 4) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_multioutput_string(name): + # Check estimators on multi-output problems with string outputs. + + X_train = [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-2, 1], + [-1, 1], + [-1, 2], + [2, -1], + [1, -1], + [1, -2], + ] + y_train = [ + ["red", "blue"], + ["red", "blue"], + ["red", "blue"], + ["green", "green"], + ["green", "green"], + ["green", "green"], + ["red", "purple"], + ["red", "purple"], + ["red", "purple"], + ["green", "yellow"], + ["green", "yellow"], + ["green", "yellow"], + ] + X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] + y_test = [ + ["red", "blue"], + ["green", "green"], + ["red", "purple"], + ["green", "yellow"], + ] + + est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) + y_pred = est.fit(X_train, y_train).predict(X_test) + assert_array_equal(y_pred, y_test) + + with np.errstate(divide="ignore"): + proba = est.predict_proba(X_test) + assert len(proba) == 2 + assert proba[0].shape == (4, 2) + assert proba[1].shape == (4, 4) + + log_proba = est.predict_log_proba(X_test) + assert len(log_proba) == 2 + assert log_proba[0].shape == (4, 2) + assert log_proba[1].shape == (4, 4) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_classes_shape(name): + # Test that n_classes_ and classes_ have proper shape. + ForestClassifier = FOREST_CLASSIFIERS[name] + + # Classification, single output + clf = ForestClassifier(random_state=0).fit(X, y) + + assert clf.n_classes_ == 2 + assert_array_equal(clf.classes_, [-1, 1]) + + # Classification, multi-output + _y = np.vstack((y, np.array(y) * 2)).T + clf = ForestClassifier(random_state=0).fit(X, _y) + + assert_array_equal(clf.n_classes_, [2, 2]) + assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) + + +def test_random_trees_dense_type(): + # Test that the `sparse_output` parameter of RandomTreesEmbedding + # works by returning a dense array. + + # Create the RTE with sparse=False + hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) + X, y = datasets.make_circles(factor=0.5) + X_transformed = hasher.fit_transform(X) + + # Assert that type is ndarray, not scipy.sparse.csr_matrix + assert isinstance(X_transformed, np.ndarray) + + +def test_random_trees_dense_equal(): + # Test that the `sparse_output` parameter of RandomTreesEmbedding + # works by returning the same array for both argument values. + + # Create the RTEs + hasher_dense = RandomTreesEmbedding( + n_estimators=10, sparse_output=False, random_state=0 + ) + hasher_sparse = RandomTreesEmbedding( + n_estimators=10, sparse_output=True, random_state=0 + ) + X, y = datasets.make_circles(factor=0.5) + X_transformed_dense = hasher_dense.fit_transform(X) + X_transformed_sparse = hasher_sparse.fit_transform(X) + + # Assert that dense and sparse hashers have same array. + assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) + + +# Ignore warnings from switching to more power iterations in randomized_svd +@ignore_warnings +def test_random_hasher(): + # test random forest hashing on circles dataset + # make sure that it is linearly separable. + # even after projected to two SVD dimensions + # Note: Not all random_states produce perfect results. + hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) + X, y = datasets.make_circles(factor=0.5) + X_transformed = hasher.fit_transform(X) + + # test fit and transform: + hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) + assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) + + # one leaf active per data point per forest + assert X_transformed.shape[0] == X.shape[0] + assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) + svd = TruncatedSVD(n_components=2) + X_reduced = svd.fit_transform(X_transformed) + linear_clf = LinearSVC() + linear_clf.fit(X_reduced, y) + assert linear_clf.score(X_reduced, y) == 1.0 + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_random_hasher_sparse_data(csc_container): + X, y = datasets.make_multilabel_classification(random_state=0) + hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) + X_transformed = hasher.fit_transform(X) + X_transformed_sparse = hasher.fit_transform(csc_container(X)) + assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) + + +def test_parallel_train(): + rng = check_random_state(12321) + n_samples, n_features = 80, 30 + X_train = rng.randn(n_samples, n_features) + y_train = rng.randint(0, 2, n_samples) + + clfs = [ + RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit( + X_train, y_train + ) + for n_jobs in [1, 2, 3, 8, 16, 32] + ] + + X_test = rng.randn(n_samples, n_features) + probas = [clf.predict_proba(X_test) for clf in clfs] + for proba1, proba2 in zip(probas, probas[1:]): + assert_array_almost_equal(proba1, proba2) + + +def test_distribution(): + rng = check_random_state(12321) + + # Single variable with 4 values + X = rng.randint(0, 4, size=(1000, 1)) + y = rng.rand(1000) + n_trees = 500 + + reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) + + uniques = defaultdict(int) + for tree in reg.estimators_: + tree = "".join( + ("%d,%d/" % (f, int(t)) if f >= 0 else "-") + for f, t in zip(tree.tree_.feature, tree.tree_.threshold) + ) + + uniques[tree] += 1 + + uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()]) + + # On a single variable problem where X_0 has 4 equiprobable values, there + # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of + # them has probability 1/3 while the 4 others have probability 1/6. + + assert len(uniques) == 5 + assert 0.20 > uniques[0][0] # Rough approximation of 1/6. + assert 0.20 > uniques[1][0] + assert 0.20 > uniques[2][0] + assert 0.20 > uniques[3][0] + assert uniques[4][0] > 0.3 + assert uniques[4][1] == "0,1/0,0/--0,2/--" + + # Two variables, one with 2 values, one with 3 values + X = np.empty((1000, 2)) + X[:, 0] = np.random.randint(0, 2, 1000) + X[:, 1] = np.random.randint(0, 3, 1000) + y = rng.rand(1000) + + reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y) + + uniques = defaultdict(int) + for tree in reg.estimators_: + tree = "".join( + ("%d,%d/" % (f, int(t)) if f >= 0 else "-") + for f, t in zip(tree.tree_.feature, tree.tree_.threshold) + ) + + uniques[tree] += 1 + + uniques = [(count, tree) for tree, count in uniques.items()] + assert len(uniques) == 8 + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_max_leaf_nodes_max_depth(name): + X, y = hastie_X, hastie_y + + # Test precedence of max_leaf_nodes over max_depth. + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator( + max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0 + ).fit(X, y) + assert est.estimators_[0].get_depth() == 1 + + est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y) + assert est.estimators_[0].get_depth() == 1 + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_min_samples_split(name): + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0) + est.fit(X, y) + node_idx = est.estimators_[0].tree_.children_left != -1 + node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] + + assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name) + + est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0) + est.fit(X, y) + node_idx = est.estimators_[0].tree_.children_left != -1 + node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] + + assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_min_samples_leaf(name): + X, y = hastie_X, hastie_y + + # Test if leaves contain more than leaf_count training examples + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0) + est.fit(X, y) + out = est.estimators_[0].tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > 4, "Failed with {0}".format(name) + + est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0) + est.fit(X, y) + out = est.estimators_[0].tree_.apply(X) + node_counts = np.bincount(out) + # drop inner nodes + leaf_count = node_counts[node_counts != 0] + assert np.min(leaf_count) > len(X) * 0.25 - 1, "Failed with {0}".format(name) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_min_weight_fraction_leaf(name): + X, y = hastie_X, hastie_y + + # Test if leaves contain at least min_weight_fraction_leaf of the + # training set + ForestEstimator = FOREST_ESTIMATORS[name] + rng = np.random.RandomState(0) + weights = rng.rand(X.shape[0]) + total_weight = np.sum(weights) + + # test both DepthFirstTreeBuilder and BestFirstTreeBuilder + # by setting max_leaf_nodes + for frac in np.linspace(0, 0.5, 6): + est = ForestEstimator( + min_weight_fraction_leaf=frac, n_estimators=1, random_state=0 + ) + if "RandomForest" in name: + est.bootstrap = False + + est.fit(X, y, sample_weight=weights) + out = est.estimators_[0].tree_.apply(X) + node_weights = np.bincount(out, weights=weights) + # drop inner nodes + leaf_weights = node_weights[node_weights != 0] + assert ( + np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf + ), "Failed with {0} min_weight_fraction_leaf={1}".format( + name, est.min_weight_fraction_leaf + ) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_sparse_input(name, sparse_container): + X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50) + + ForestEstimator = FOREST_ESTIMATORS[name] + + dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) + sparse = ForestEstimator(random_state=0, max_depth=2).fit(sparse_container(X), y) + + assert_array_almost_equal(sparse.apply(X), dense.apply(X)) + + if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: + assert_array_almost_equal(sparse.predict(X), dense.predict(X)) + assert_array_almost_equal( + sparse.feature_importances_, dense.feature_importances_ + ) + + if name in FOREST_CLASSIFIERS: + assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) + assert_array_almost_equal( + sparse.predict_log_proba(X), dense.predict_log_proba(X) + ) + + if name in FOREST_TRANSFORMERS: + assert_array_almost_equal( + sparse.transform(X).toarray(), dense.transform(X).toarray() + ) + assert_array_almost_equal( + sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray() + ) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +@pytest.mark.parametrize("dtype", (np.float64, np.float32)) +def test_memory_layout(name, dtype): + # Test that it works no matter the memory layout + est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) + + # Dense + for container, kwargs in ( + (np.asarray, {}), # Nothing + (np.asarray, {"order": "C"}), # C-order + (np.asarray, {"order": "F"}), # F-order + (np.ascontiguousarray, {}), # Contiguous + ): + X = container(iris.data, dtype=dtype, **kwargs) + y = iris.target + assert_array_almost_equal(est.fit(X, y).predict(X), y) + + # Sparse (if applicable) + if est.estimator.splitter in SPARSE_SPLITTERS: + for sparse_container in COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS: + X = sparse_container(iris.data, dtype=dtype) + y = iris.target + assert_array_almost_equal(est.fit(X, y).predict(X), y) + + # Strided + X = np.asarray(iris.data[::3], dtype=dtype) + y = iris.target[::3] + assert_array_almost_equal(est.fit(X, y).predict(X), y) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_1d_input(name): + X = iris.data[:, 0] + X_2d = iris.data[:, 0].reshape((-1, 1)) + y = iris.target + + with ignore_warnings(): + ForestEstimator = FOREST_ESTIMATORS[name] + with pytest.raises(ValueError): + ForestEstimator(n_estimators=1, random_state=0).fit(X, y) + + est = ForestEstimator(random_state=0) + est.fit(X_2d, y) + + if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: + with pytest.raises(ValueError): + est.predict(X) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_class_weights(name): + # Check class_weights resemble sample_weights behavior. + ForestClassifier = FOREST_CLASSIFIERS[name] + + # Iris is balanced, so no effect expected for using 'balanced' weights + clf1 = ForestClassifier(random_state=0) + clf1.fit(iris.data, iris.target) + clf2 = ForestClassifier(class_weight="balanced", random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Make a multi-output problem with three copies of Iris + iris_multi = np.vstack((iris.target, iris.target, iris.target)).T + # Create user-defined weights that should balance over the outputs + clf3 = ForestClassifier( + class_weight=[ + {0: 2.0, 1: 2.0, 2: 1.0}, + {0: 2.0, 1: 1.0, 2: 2.0}, + {0: 1.0, 1: 2.0, 2: 2.0}, + ], + random_state=0, + ) + clf3.fit(iris.data, iris_multi) + assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) + # Check against multi-output "balanced" which should also have no effect + clf4 = ForestClassifier(class_weight="balanced", random_state=0) + clf4.fit(iris.data, iris_multi) + assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) + + # Inflate importance of class 1, check against user-defined weights + sample_weight = np.ones(iris.target.shape) + sample_weight[iris.target == 1] *= 100 + class_weight = {0: 1.0, 1: 100.0, 2: 1.0} + clf1 = ForestClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight) + clf2 = ForestClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + # Check that sample_weight and class_weight are multiplicative + clf1 = ForestClassifier(random_state=0) + clf1.fit(iris.data, iris.target, sample_weight**2) + clf2 = ForestClassifier(class_weight=class_weight, random_state=0) + clf2.fit(iris.data, iris.target, sample_weight) + assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_class_weight_balanced_and_bootstrap_multi_output(name): + # Test class_weight works for multi-output""" + ForestClassifier = FOREST_CLASSIFIERS[name] + _y = np.vstack((y, np.array(y) * 2)).T + clf = ForestClassifier(class_weight="balanced", random_state=0) + clf.fit(X, _y) + clf = ForestClassifier( + class_weight=[{-1: 0.5, 1: 1.0}, {-2: 1.0, 2: 1.0}], random_state=0 + ) + clf.fit(X, _y) + # smoke test for balanced subsample + clf = ForestClassifier(class_weight="balanced_subsample", random_state=0) + clf.fit(X, _y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_class_weight_errors(name): + # Test if class_weight raises errors and warnings when expected. + ForestClassifier = FOREST_CLASSIFIERS[name] + _y = np.vstack((y, np.array(y) * 2)).T + + # Warning warm_start with preset + clf = ForestClassifier(class_weight="balanced", warm_start=True, random_state=0) + clf.fit(X, y) + + warn_msg = ( + "Warm-start fitting without increasing n_estimators does not fit new trees." + ) + with pytest.warns(UserWarning, match=warn_msg): + clf.fit(X, _y) + + # Incorrect length list for multi-output + clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0) + with pytest.raises(ValueError): + clf.fit(X, _y) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start(name): + # Test if fitting incrementally with warm start gives a forest of the + # right size and the same results as a normal fit. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est_ws = None + for n_estimators in [5, 10]: + if est_ws is None: + est_ws = ForestEstimator( + n_estimators=n_estimators, random_state=42, warm_start=True + ) + else: + est_ws.set_params(n_estimators=n_estimators) + est_ws.fit(X, y) + assert len(est_ws) == n_estimators + + est_no_ws = ForestEstimator(n_estimators=10, random_state=42, warm_start=False) + est_no_ws.fit(X, y) + + assert set([tree.random_state for tree in est_ws]) == set( + [tree.random_state for tree in est_no_ws] + ) + + assert_array_equal( + est_ws.apply(X), est_no_ws.apply(X), err_msg="Failed with {0}".format(name) + ) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start_clear(name): + # Test if fit clears state and grows a new forest when warm_start==False. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) + est.fit(X, y) + + est_2 = ForestEstimator( + n_estimators=5, max_depth=1, warm_start=True, random_state=2 + ) + est_2.fit(X, y) # inits state + est_2.set_params(warm_start=False, random_state=1) + est_2.fit(X, y) # clears old state and equals est + + assert_array_almost_equal(est_2.apply(X), est.apply(X)) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start_smaller_n_estimators(name): + # Test if warm start second fit with smaller n_estimators raises error. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=4) + with pytest.raises(ValueError): + est.fit(X, y) + + +@pytest.mark.parametrize("name", FOREST_ESTIMATORS) +def test_warm_start_equal_n_estimators(name): + # Test if warm start with equal n_estimators does nothing and returns the + # same forest and raises a warning. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) + est.fit(X, y) + + est_2 = ForestEstimator( + n_estimators=5, max_depth=3, warm_start=True, random_state=1 + ) + est_2.fit(X, y) + # Now est_2 equals est. + + est_2.set_params(random_state=2) + warn_msg = ( + "Warm-start fitting without increasing n_estimators does not fit new trees." + ) + with pytest.warns(UserWarning, match=warn_msg): + est_2.fit(X, y) + # If we had fit the trees again we would have got a different forest as we + # changed the random state. + assert_array_equal(est.apply(X), est_2.apply(X)) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_warm_start_oob(name): + # Test that the warm start computes oob score when asked. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. + est = ForestEstimator( + n_estimators=15, + max_depth=3, + warm_start=False, + random_state=1, + bootstrap=True, + oob_score=True, + ) + est.fit(X, y) + + est_2 = ForestEstimator( + n_estimators=5, + max_depth=3, + warm_start=False, + random_state=1, + bootstrap=True, + oob_score=False, + ) + est_2.fit(X, y) + + est_2.set_params(warm_start=True, oob_score=True, n_estimators=15) + est_2.fit(X, y) + + assert hasattr(est_2, "oob_score_") + assert est.oob_score_ == est_2.oob_score_ + + # Test that oob_score is computed even if we don't need to train + # additional trees. + est_3 = ForestEstimator( + n_estimators=15, + max_depth=3, + warm_start=True, + random_state=1, + bootstrap=True, + oob_score=False, + ) + est_3.fit(X, y) + assert not hasattr(est_3, "oob_score_") + + est_3.set_params(oob_score=True) + ignore_warnings(est_3.fit)(X, y) + + assert est.oob_score_ == est_3.oob_score_ + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_oob_not_computed_twice(name): + # Check that oob_score is not computed twice when warm_start=True. + X, y = hastie_X, hastie_y + ForestEstimator = FOREST_ESTIMATORS[name] + + est = ForestEstimator( + n_estimators=10, warm_start=True, bootstrap=True, oob_score=True + ) + + with patch.object( + est, "_set_oob_score_and_attributes", wraps=est._set_oob_score_and_attributes + ) as mock_set_oob_score_and_attributes: + est.fit(X, y) + + with pytest.warns(UserWarning, match="Warm-start fitting without increasing"): + est.fit(X, y) + + mock_set_oob_score_and_attributes.assert_called_once() + + +def test_dtype_convert(n_classes=15): + classifier = RandomForestClassifier(random_state=0, bootstrap=False) + + X = np.eye(n_classes) + y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:n_classes]] + + result = classifier.fit(X, y).predict(X) + assert_array_equal(classifier.classes_, y) + assert_array_equal(result, y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_decision_path(name): + X, y = hastie_X, hastie_y + n_samples = X.shape[0] + ForestEstimator = FOREST_ESTIMATORS[name] + est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) + est.fit(X, y) + indicator, n_nodes_ptr = est.decision_path(X) + + assert indicator.shape[1] == n_nodes_ptr[-1] + assert indicator.shape[0] == n_samples + assert_array_equal( + np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_] + ) + + # Assert that leaves index are correct + leaves = est.apply(X) + for est_id in range(leaves.shape[1]): + leave_indicator = [ + indicator[i, n_nodes_ptr[est_id] + j] + for i, j in enumerate(leaves[:, est_id]) + ] + assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) + + +def test_min_impurity_decrease(): + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + all_estimators = [ + RandomForestClassifier, + RandomForestRegressor, + ExtraTreesClassifier, + ExtraTreesRegressor, + ] + + for Estimator in all_estimators: + est = Estimator(min_impurity_decrease=0.1) + est.fit(X, y) + for tree in est.estimators_: + # Simply check if the parameter is passed on correctly. Tree tests + # will suffice for the actual working of this param + assert tree.min_impurity_decrease == 0.1 + + +def test_poisson_y_positive_check(): + est = RandomForestRegressor(criterion="poisson") + X = np.zeros((3, 3)) + + y = [-1, 1, 3] + err_msg = ( + r"Some value\(s\) of y are negative which is " + r"not allowed for Poisson regression." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, y) + + y = [0, 0, 0] + err_msg = ( + r"Sum of y is not strictly positive which " + r"is necessary for Poisson regression." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, y) + + +# mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type +class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore + def __init__(self, *args, **kwargs): + self.count = 0 + super().__init__(*args, **kwargs) + + def start_call(self): + self.count += 1 + return super().start_call() + + +joblib.register_parallel_backend("testing", MyBackend) + + +@skip_if_no_parallel +def test_backend_respected(): + clf = RandomForestClassifier(n_estimators=10, n_jobs=2) + + with joblib.parallel_backend("testing") as (ba, n_jobs): + clf.fit(X, y) + + assert ba.count > 0 + + # predict_proba requires shared memory. Ensure that's honored. + with joblib.parallel_backend("testing") as (ba, _): + clf.predict_proba(X) + + assert ba.count == 0 + + +def test_forest_feature_importances_sum(): + X, y = make_classification( + n_samples=15, n_informative=3, random_state=1, n_classes=3 + ) + clf = RandomForestClassifier( + min_samples_leaf=5, random_state=42, n_estimators=200 + ).fit(X, y) + assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7) + + +def test_forest_degenerate_feature_importances(): + # build a forest of single node trees. See #13636 + X = np.zeros((10, 10)) + y = np.ones((10,)) + gbr = RandomForestRegressor(n_estimators=10).fit(X, y) + assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64)) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_max_samples_bootstrap(name): + # Check invalid `max_samples` values + est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=False, max_samples=0.5) + err_msg = ( + r"`max_sample` cannot be set if `bootstrap=False`. " + r"Either switch to `bootstrap=True` or set " + r"`max_sample=None`." + ) + with pytest.raises(ValueError, match=err_msg): + est.fit(X, y) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS) +def test_large_max_samples_exception(name): + # Check invalid `max_samples` + est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=True, max_samples=int(1e9)) + match = "`max_samples` must be <= n_samples=6 but got value 1000000000" + with pytest.raises(ValueError, match=match): + est.fit(X, y) + + +@pytest.mark.parametrize("name", FOREST_REGRESSORS) +def test_max_samples_boundary_regressors(name): + X_train, X_test, y_train, y_test = train_test_split( + X_reg, y_reg, train_size=0.7, test_size=0.3, random_state=0 + ) + + ms_1_model = FOREST_REGRESSORS[name]( + bootstrap=True, max_samples=1.0, random_state=0 + ) + ms_1_predict = ms_1_model.fit(X_train, y_train).predict(X_test) + + ms_None_model = FOREST_REGRESSORS[name]( + bootstrap=True, max_samples=None, random_state=0 + ) + ms_None_predict = ms_None_model.fit(X_train, y_train).predict(X_test) + + ms_1_ms = mean_squared_error(ms_1_predict, y_test) + ms_None_ms = mean_squared_error(ms_None_predict, y_test) + + assert ms_1_ms == pytest.approx(ms_None_ms) + + +@pytest.mark.parametrize("name", FOREST_CLASSIFIERS) +def test_max_samples_boundary_classifiers(name): + X_train, X_test, y_train, _ = train_test_split( + X_large, y_large, random_state=0, stratify=y_large + ) + + ms_1_model = FOREST_CLASSIFIERS[name]( + bootstrap=True, max_samples=1.0, random_state=0 + ) + ms_1_proba = ms_1_model.fit(X_train, y_train).predict_proba(X_test) + + ms_None_model = FOREST_CLASSIFIERS[name]( + bootstrap=True, max_samples=None, random_state=0 + ) + ms_None_proba = ms_None_model.fit(X_train, y_train).predict_proba(X_test) + + np.testing.assert_allclose(ms_1_proba, ms_None_proba) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_forest_y_sparse(csr_container): + X = [[1, 2, 3]] + y = csr_container([[4, 5, 6]]) + est = RandomForestClassifier() + msg = "sparse multilabel-indicator for y is not supported." + with pytest.raises(ValueError, match=msg): + est.fit(X, y) + + +@pytest.mark.parametrize("ForestClass", [RandomForestClassifier, RandomForestRegressor]) +def test_little_tree_with_small_max_samples(ForestClass): + rng = np.random.RandomState(1) + + X = rng.randn(10000, 2) + y = rng.randn(10000) > 0 + + # First fit with no restriction on max samples + est1 = ForestClass( + n_estimators=1, + random_state=rng, + max_samples=None, + ) + + # Second fit with max samples restricted to just 2 + est2 = ForestClass( + n_estimators=1, + random_state=rng, + max_samples=2, + ) + + est1.fit(X, y) + est2.fit(X, y) + + tree1 = est1.estimators_[0].tree_ + tree2 = est2.estimators_[0].tree_ + + msg = "Tree without `max_samples` restriction should have more nodes" + assert tree1.node_count > tree2.node_count, msg + + +@pytest.mark.parametrize("Forest", FOREST_REGRESSORS) +def test_mse_criterion_object_segfault_smoke_test(Forest): + # This is a smoke test to ensure that passing a mutable criterion + # does not cause a segfault when fitting with concurrent threads. + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/12623 + from sklearn.tree._criterion import MSE + + y = y_reg.reshape(-1, 1) + n_samples, n_outputs = y.shape + mse_criterion = MSE(n_outputs, n_samples) + est = FOREST_REGRESSORS[Forest](n_estimators=2, n_jobs=2, criterion=mse_criterion) + + est.fit(X_reg, y) + + +def test_random_trees_embedding_feature_names_out(): + """Check feature names out for Random Trees Embedding.""" + random_state = np.random.RandomState(0) + X = np.abs(random_state.randn(100, 4)) + hasher = RandomTreesEmbedding( + n_estimators=2, max_depth=2, sparse_output=False, random_state=0 + ).fit(X) + names = hasher.get_feature_names_out() + expected_names = [ + f"randomtreesembedding_{tree}_{leaf}" + # Note: nodes with indices 0, 1 and 4 are internal split nodes and + # therefore do not appear in the expected output feature names. + for tree, leaf in [ + (0, 2), + (0, 3), + (0, 5), + (0, 6), + (1, 2), + (1, 3), + (1, 5), + (1, 6), + ] + ] + assert_array_equal(expected_names, names) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_read_only_buffer(csr_container, monkeypatch): + """RandomForestClassifier must work on readonly sparse data. + + Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25333 + """ + monkeypatch.setattr( + sklearn.ensemble._forest, + "Parallel", + partial(Parallel, max_nbytes=100), + ) + rng = np.random.RandomState(seed=0) + + X, y = make_classification(n_samples=100, n_features=200, random_state=rng) + X = csr_container(X, copy=True) + + clf = RandomForestClassifier(n_jobs=2, random_state=rng) + cross_val_score(clf, X, y, cv=2) + + +@pytest.mark.parametrize("class_weight", ["balanced_subsample", None]) +def test_round_samples_to_one_when_samples_too_low(class_weight): + """Check low max_samples works and is rounded to one. + + Non-regression test for gh-24037. + """ + X, y = datasets.load_wine(return_X_y=True) + forest = RandomForestClassifier( + n_estimators=10, max_samples=1e-4, class_weight=class_weight, random_state=0 + ) + forest.fit(X, y) + + +@pytest.mark.parametrize("seed", [None, 1]) +@pytest.mark.parametrize("bootstrap", [True, False]) +@pytest.mark.parametrize("ForestClass", FOREST_CLASSIFIERS_REGRESSORS.values()) +def test_estimators_samples(ForestClass, bootstrap, seed): + """Estimators_samples_ property should be consistent. + + Tests consistency across fits and whether or not the seed for the random generator + is set. + """ + X, y = make_hastie_10_2(n_samples=200, random_state=1) + + if bootstrap: + max_samples = 0.5 + else: + max_samples = None + est = ForestClass( + n_estimators=10, + max_samples=max_samples, + max_features=0.5, + random_state=seed, + bootstrap=bootstrap, + ) + est.fit(X, y) + + estimators_samples = est.estimators_samples_.copy() + + # Test repeated calls result in same set of indices + assert_array_equal(estimators_samples, est.estimators_samples_) + estimators = est.estimators_ + + assert isinstance(estimators_samples, list) + assert len(estimators_samples) == len(estimators) + assert estimators_samples[0].dtype == np.int32 + + for i in range(len(estimators)): + if bootstrap: + assert len(estimators_samples[i]) == len(X) // 2 + + # the bootstrap should be a resampling with replacement + assert len(np.unique(estimators_samples[i])) < len(estimators_samples[i]) + else: + assert len(set(estimators_samples[i])) == len(X) + + estimator_index = 0 + estimator_samples = estimators_samples[estimator_index] + estimator = estimators[estimator_index] + + X_train = X[estimator_samples] + y_train = y[estimator_samples] + + orig_tree_values = estimator.tree_.value + estimator = clone(estimator) + estimator.fit(X_train, y_train) + new_tree_values = estimator.tree_.value + assert_allclose(orig_tree_values, new_tree_values) + + +@pytest.mark.parametrize( + "make_data, Forest", + [ + (datasets.make_regression, RandomForestRegressor), + (datasets.make_classification, RandomForestClassifier), + ], +) +def test_missing_values_is_resilient(make_data, Forest): + """Check that forest can deal with missing values and has decent performance.""" + + rng = np.random.RandomState(0) + n_samples, n_features = 1000, 10 + X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng) + + # Create dataset with missing values + X_missing = X.copy() + X_missing[rng.choice([False, True], size=X.shape, p=[0.95, 0.05])] = np.nan + assert np.isnan(X_missing).any() + + X_missing_train, X_missing_test, y_train, y_test = train_test_split( + X_missing, y, random_state=0 + ) + + # Train forest with missing values + forest_with_missing = Forest(random_state=rng, n_estimators=50) + forest_with_missing.fit(X_missing_train, y_train) + score_with_missing = forest_with_missing.score(X_missing_test, y_test) + + # Train forest without missing values + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + forest = Forest(random_state=rng, n_estimators=50) + forest.fit(X_train, y_train) + score_without_missing = forest.score(X_test, y_test) + + # Score is still 80 percent of the forest's score that had no missing values + assert score_with_missing >= 0.80 * score_without_missing + + +@pytest.mark.parametrize("Forest", [RandomForestClassifier, RandomForestRegressor]) +def test_missing_value_is_predictive(Forest): + """Check that the forest learns when missing values are only present for + a predictive feature.""" + rng = np.random.RandomState(0) + n_samples = 300 + + X_non_predictive = rng.standard_normal(size=(n_samples, 10)) + y = rng.randint(0, high=2, size=n_samples) + + # Create a predictive feature using `y` and with some noise + X_random_mask = rng.choice([False, True], size=n_samples, p=[0.95, 0.05]) + y_mask = y.astype(bool) + y_mask[X_random_mask] = ~y_mask[X_random_mask] + + predictive_feature = rng.standard_normal(size=n_samples) + predictive_feature[y_mask] = np.nan + assert np.isnan(predictive_feature).any() + + X_predictive = X_non_predictive.copy() + X_predictive[:, 5] = predictive_feature + + ( + X_predictive_train, + X_predictive_test, + X_non_predictive_train, + X_non_predictive_test, + y_train, + y_test, + ) = train_test_split(X_predictive, X_non_predictive, y, random_state=0) + forest_predictive = Forest(random_state=0).fit(X_predictive_train, y_train) + forest_non_predictive = Forest(random_state=0).fit(X_non_predictive_train, y_train) + + predictive_test_score = forest_predictive.score(X_predictive_test, y_test) + + assert predictive_test_score >= 0.75 + assert predictive_test_score >= forest_non_predictive.score( + X_non_predictive_test, y_test + ) + + +def test_non_supported_criterion_raises_error_with_missing_values(): + """Raise error for unsupported criterion when there are missing values.""" + X = np.array([[0, 1, 2], [np.nan, 0, 2.0]]) + y = [0.5, 1.0] + + forest = RandomForestRegressor(criterion="absolute_error") + + msg = "RandomForestRegressor does not accept missing values" + with pytest.raises(ValueError, match=msg): + forest.fit(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..4bfbf7c2ff6ee1a0c3c3fe7c7733691964f58429 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py @@ -0,0 +1,1710 @@ +""" +Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting). +""" +import re +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn import datasets +from sklearn.base import clone +from sklearn.datasets import make_classification, make_regression +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor +from sklearn.ensemble._gb import _safe_divide +from sklearn.ensemble._gradient_boosting import predict_stages +from sklearn.exceptions import DataConversionWarning, NotFittedError +from sklearn.linear_model import LinearRegression +from sklearn.metrics import mean_squared_error +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import scale +from sklearn.svm import NuSVR +from sklearn.utils import check_random_state, tosequence +from sklearn.utils._mocking import NoSampleWeightWrapper +from sklearn.utils._param_validation import InvalidParameterError +from sklearn.utils._testing import ( + assert_array_almost_equal, + assert_array_equal, + skip_if_32bit, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +GRADIENT_BOOSTING_ESTIMATORS = [GradientBoostingClassifier, GradientBoostingRegressor] + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +true_result = [-1, 1, 1] + +# also make regression dataset +X_reg, y_reg = make_regression( + n_samples=100, n_features=4, n_informative=8, noise=10, random_state=7 +) +y_reg = scale(y_reg) + +rng = np.random.RandomState(0) +# also load the iris dataset +# and randomly permute it +iris = datasets.load_iris() +perm = rng.permutation(iris.target.size) +iris.data = iris.data[perm] +iris.target = iris.target[perm] + + +def test_exponential_n_classes_gt_2(): + """Test exponential loss raises for n_classes > 2.""" + clf = GradientBoostingClassifier(loss="exponential") + msg = "loss='exponential' is only suitable for a binary classification" + with pytest.raises(ValueError, match=msg): + clf.fit(iris.data, iris.target) + + +def test_raise_if_init_has_no_predict_proba(): + """Test raise if init_ has no predict_proba method.""" + clf = GradientBoostingClassifier(init=GradientBoostingRegressor) + msg = ( + "The 'init' parameter of GradientBoostingClassifier must be a str among " + "{'zero'}, None or an object implementing 'fit' and 'predict_proba'." + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +@pytest.mark.parametrize("loss", ("log_loss", "exponential")) +def test_classification_toy(loss, global_random_seed): + # Check classification on a toy dataset. + clf = GradientBoostingClassifier( + loss=loss, n_estimators=10, random_state=global_random_seed + ) + + with pytest.raises(ValueError): + clf.predict(T) + + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 10 == len(clf.estimators_) + + log_loss_decrease = clf.train_score_[:-1] - clf.train_score_[1:] + assert np.any(log_loss_decrease >= 0.0) + + leaves = clf.apply(X) + assert leaves.shape == (6, 10, 1) + + +@pytest.mark.parametrize("loss", ("log_loss", "exponential")) +def test_classification_synthetic(loss, global_random_seed): + # Test GradientBoostingClassifier on synthetic dataset used by + # Hastie et al. in ESLII - Figure 10.9 + # Note that Figure 10.9 reuses the dataset generated for figure 10.2 + # and should have 2_000 train data points and 10_000 test data points. + # Here we intentionally use a smaller variant to make the test run faster, + # but the conclusions are still the same, despite the smaller datasets. + X, y = datasets.make_hastie_10_2(n_samples=2000, random_state=global_random_seed) + + split_idx = 500 + X_train, X_test = X[:split_idx], X[split_idx:] + y_train, y_test = y[:split_idx], y[split_idx:] + + # Increasing the number of trees should decrease the test error + common_params = { + "max_depth": 1, + "learning_rate": 1.0, + "loss": loss, + "random_state": global_random_seed, + } + gbrt_10_stumps = GradientBoostingClassifier(n_estimators=10, **common_params) + gbrt_10_stumps.fit(X_train, y_train) + + gbrt_50_stumps = GradientBoostingClassifier(n_estimators=50, **common_params) + gbrt_50_stumps.fit(X_train, y_train) + + assert gbrt_10_stumps.score(X_test, y_test) < gbrt_50_stumps.score(X_test, y_test) + + # Decision stumps are better suited for this dataset with a large number of + # estimators. + common_params = { + "n_estimators": 200, + "learning_rate": 1.0, + "loss": loss, + "random_state": global_random_seed, + } + gbrt_stumps = GradientBoostingClassifier(max_depth=1, **common_params) + gbrt_stumps.fit(X_train, y_train) + + gbrt_10_nodes = GradientBoostingClassifier(max_leaf_nodes=10, **common_params) + gbrt_10_nodes.fit(X_train, y_train) + + assert gbrt_stumps.score(X_test, y_test) > gbrt_10_nodes.score(X_test, y_test) + + +@pytest.mark.parametrize("loss", ("squared_error", "absolute_error", "huber")) +@pytest.mark.parametrize("subsample", (1.0, 0.5)) +def test_regression_dataset(loss, subsample, global_random_seed): + # Check consistency on regression dataset with least squares + # and least absolute deviation. + ones = np.ones(len(y_reg)) + last_y_pred = None + for sample_weight in [None, ones, 2 * ones]: + # learning_rate, max_depth and n_estimators were adjusted to get a mode + # that is accurate enough to reach a low MSE on the training set while + # keeping the resource used to execute this test low enough. + reg = GradientBoostingRegressor( + n_estimators=30, + loss=loss, + max_depth=4, + subsample=subsample, + min_samples_split=2, + random_state=global_random_seed, + learning_rate=0.5, + ) + + reg.fit(X_reg, y_reg, sample_weight=sample_weight) + leaves = reg.apply(X_reg) + assert leaves.shape == (100, 30) + + y_pred = reg.predict(X_reg) + mse = mean_squared_error(y_reg, y_pred) + assert mse < 0.05 + + if last_y_pred is not None: + # FIXME: We temporarily bypass this test. This is due to the fact + # that GBRT with and without `sample_weight` do not use the same + # implementation of the median during the initialization with the + # `DummyRegressor`. In the future, we should make sure that both + # implementations should be the same. See PR #17377 for more. + # assert_allclose(last_y_pred, y_pred) + pass + + last_y_pred = y_pred + + +@pytest.mark.parametrize("subsample", (1.0, 0.5)) +@pytest.mark.parametrize("sample_weight", (None, 1)) +def test_iris(subsample, sample_weight, global_random_seed): + if sample_weight == 1: + sample_weight = np.ones(len(iris.target)) + # Check consistency on dataset iris. + clf = GradientBoostingClassifier( + n_estimators=100, + loss="log_loss", + random_state=global_random_seed, + subsample=subsample, + ) + clf.fit(iris.data, iris.target, sample_weight=sample_weight) + score = clf.score(iris.data, iris.target) + assert score > 0.9 + + leaves = clf.apply(iris.data) + assert leaves.shape == (150, 100, 3) + + +def test_regression_synthetic(global_random_seed): + # Test on synthetic regression datasets used in Leo Breiman, + # `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). + random_state = check_random_state(global_random_seed) + regression_params = { + "n_estimators": 100, + "max_depth": 4, + "min_samples_split": 2, + "learning_rate": 0.1, + "loss": "squared_error", + "random_state": global_random_seed, + } + + # Friedman1 + X, y = datasets.make_friedman1(n_samples=1200, random_state=random_state, noise=1.0) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + + clf = GradientBoostingRegressor(**regression_params) + clf.fit(X_train, y_train) + mse = mean_squared_error(y_test, clf.predict(X_test)) + assert mse < 6.5 + + # Friedman2 + X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + + clf = GradientBoostingRegressor(**regression_params) + clf.fit(X_train, y_train) + mse = mean_squared_error(y_test, clf.predict(X_test)) + assert mse < 2500.0 + + # Friedman3 + X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + + clf = GradientBoostingRegressor(**regression_params) + clf.fit(X_train, y_train) + mse = mean_squared_error(y_test, clf.predict(X_test)) + assert mse < 0.025 + + +@pytest.mark.parametrize( + "GradientBoosting, X, y", + [ + (GradientBoostingRegressor, X_reg, y_reg), + (GradientBoostingClassifier, iris.data, iris.target), + ], +) +def test_feature_importances(GradientBoosting, X, y): + # smoke test to check that the gradient boosting expose an attribute + # feature_importances_ + gbdt = GradientBoosting() + assert not hasattr(gbdt, "feature_importances_") + gbdt.fit(X, y) + assert hasattr(gbdt, "feature_importances_") + + +def test_probability_log(global_random_seed): + # Predict probabilities. + clf = GradientBoostingClassifier(n_estimators=100, random_state=global_random_seed) + + with pytest.raises(ValueError): + clf.predict_proba(T) + + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + + # check if probabilities are in [0, 1]. + y_proba = clf.predict_proba(T) + assert np.all(y_proba >= 0.0) + assert np.all(y_proba <= 1.0) + + # derive predictions from probabilities + y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0) + assert_array_equal(y_pred, true_result) + + +def test_single_class_with_sample_weight(): + sample_weight = [0, 0, 0, 1, 1, 1] + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + msg = ( + "y contains 1 class after sample_weight trimmed classes with " + "zero weights, while a minimum of 2 classes are required." + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_check_inputs_predict_stages(csc_container): + # check that predict_stages through an error if the type of X is not + # supported + x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + x_sparse_csc = csc_container(x) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(x, y) + score = np.zeros((y.shape)).reshape(-1, 1) + err_msg = "When X is a sparse matrix, a CSR format is expected" + with pytest.raises(ValueError, match=err_msg): + predict_stages(clf.estimators_, x_sparse_csc, clf.learning_rate, score) + x_fortran = np.asfortranarray(x) + with pytest.raises(ValueError, match="X should be C-ordered np.ndarray"): + predict_stages(clf.estimators_, x_fortran, clf.learning_rate, score) + + +def test_max_feature_regression(global_random_seed): + # Test to make sure random state is set properly. + X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=global_random_seed) + + X_train, X_test = X[:2000], X[2000:] + y_train, y_test = y[:2000], y[2000:] + + gbrt = GradientBoostingClassifier( + n_estimators=100, + min_samples_split=5, + max_depth=2, + learning_rate=0.1, + max_features=2, + random_state=global_random_seed, + ) + gbrt.fit(X_train, y_train) + log_loss = gbrt._loss(y_test, gbrt.decision_function(X_test)) + assert log_loss < 0.5, "GB failed with deviance %.4f" % log_loss + + +def test_feature_importance_regression( + fetch_california_housing_fxt, global_random_seed +): + """Test that Gini importance is calculated correctly. + + This test follows the example from [1]_ (pg. 373). + + .. [1] Friedman, J., Hastie, T., & Tibshirani, R. (2001). The elements + of statistical learning. New York: Springer series in statistics. + """ + california = fetch_california_housing_fxt() + X, y = california.data, california.target + X_train, X_test, y_train, y_test = train_test_split( + X, y, random_state=global_random_seed + ) + + reg = GradientBoostingRegressor( + loss="huber", + learning_rate=0.1, + max_leaf_nodes=6, + n_estimators=100, + random_state=global_random_seed, + ) + reg.fit(X_train, y_train) + sorted_idx = np.argsort(reg.feature_importances_)[::-1] + sorted_features = [california.feature_names[s] for s in sorted_idx] + + # The most important feature is the median income by far. + assert sorted_features[0] == "MedInc" + + # The three subsequent features are the following. Their relative ordering + # might change a bit depending on the randomness of the trees and the + # train / test split. + assert set(sorted_features[1:4]) == {"Longitude", "AveOccup", "Latitude"} + + +def test_max_features(): + # Test if max features is set properly for floats and str. + X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) + _, n_features = X.shape + + X_train = X[:2000] + y_train = y[:2000] + + gbrt = GradientBoostingClassifier(n_estimators=1, max_features=None) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == n_features + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features=None) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == n_features + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == int(n_features * 0.3) + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features="sqrt") + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == int(np.sqrt(n_features)) + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features="log2") + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == int(np.log2(n_features)) + + gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.01 / X.shape[1]) + gbrt.fit(X_train, y_train) + assert gbrt.max_features_ == 1 + + +def test_staged_predict(): + # Test whether staged decision function eventually gives + # the same prediction. + X, y = datasets.make_friedman1(n_samples=1200, random_state=1, noise=1.0) + X_train, y_train = X[:200], y[:200] + X_test = X[200:] + clf = GradientBoostingRegressor() + # test raise ValueError if not fitted + with pytest.raises(ValueError): + np.fromiter(clf.staged_predict(X_test), dtype=np.float64) + + clf.fit(X_train, y_train) + y_pred = clf.predict(X_test) + + # test if prediction for last stage equals ``predict`` + for y in clf.staged_predict(X_test): + assert y.shape == y_pred.shape + + assert_array_almost_equal(y_pred, y) + + +def test_staged_predict_proba(): + # Test whether staged predict proba eventually gives + # the same prediction. + X, y = datasets.make_hastie_10_2(n_samples=1200, random_state=1) + X_train, y_train = X[:200], y[:200] + X_test, y_test = X[200:], y[200:] + clf = GradientBoostingClassifier(n_estimators=20) + # test raise NotFittedError if not + with pytest.raises(NotFittedError): + np.fromiter(clf.staged_predict_proba(X_test), dtype=np.float64) + + clf.fit(X_train, y_train) + + # test if prediction for last stage equals ``predict`` + for y_pred in clf.staged_predict(X_test): + assert y_test.shape == y_pred.shape + + assert_array_equal(clf.predict(X_test), y_pred) + + # test if prediction for last stage equals ``predict_proba`` + for staged_proba in clf.staged_predict_proba(X_test): + assert y_test.shape[0] == staged_proba.shape[0] + assert 2 == staged_proba.shape[1] + + assert_array_almost_equal(clf.predict_proba(X_test), staged_proba) + + +@pytest.mark.parametrize("Estimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_staged_functions_defensive(Estimator, global_random_seed): + # test that staged_functions make defensive copies + rng = np.random.RandomState(global_random_seed) + X = rng.uniform(size=(10, 3)) + y = (4 * X[:, 0]).astype(int) + 1 # don't predict zeros + estimator = Estimator() + estimator.fit(X, y) + for func in ["predict", "decision_function", "predict_proba"]: + staged_func = getattr(estimator, "staged_" + func, None) + if staged_func is None: + # regressor has no staged_predict_proba + continue + with warnings.catch_warnings(record=True): + staged_result = list(staged_func(X)) + staged_result[1][:] = 0 + assert np.all(staged_result[0] != 0) + + +def test_serialization(): + # Check model serialization. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + clf.fit(X, y) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + try: + import cPickle as pickle + except ImportError: + import pickle + + serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL) + clf = None + clf = pickle.loads(serialized_clf) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + +def test_degenerate_targets(): + # Check if we can fit even though all targets are equal. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + # classifier should raise exception + with pytest.raises(ValueError): + clf.fit(X, np.ones(len(X))) + + clf = GradientBoostingRegressor(n_estimators=100, random_state=1) + clf.fit(X, np.ones(len(X))) + clf.predict([rng.rand(2)]) + assert_array_equal(np.ones((1,), dtype=np.float64), clf.predict([rng.rand(2)])) + + +def test_quantile_loss(global_random_seed): + # Check if quantile loss with alpha=0.5 equals absolute_error. + clf_quantile = GradientBoostingRegressor( + n_estimators=100, + loss="quantile", + max_depth=4, + alpha=0.5, + random_state=global_random_seed, + ) + + clf_quantile.fit(X_reg, y_reg) + y_quantile = clf_quantile.predict(X_reg) + + clf_ae = GradientBoostingRegressor( + n_estimators=100, + loss="absolute_error", + max_depth=4, + random_state=global_random_seed, + ) + + clf_ae.fit(X_reg, y_reg) + y_ae = clf_ae.predict(X_reg) + assert_allclose(y_quantile, y_ae) + + +def test_symbol_labels(): + # Test with non-integer class labels. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + symbol_y = tosequence(map(str, y)) + + clf.fit(X, symbol_y) + assert_array_equal(clf.predict(T), tosequence(map(str, true_result))) + assert 100 == len(clf.estimators_) + + +def test_float_class_labels(): + # Test with float class labels. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + float_y = np.asarray(y, dtype=np.float32) + + clf.fit(X, float_y) + assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32)) + assert 100 == len(clf.estimators_) + + +def test_shape_y(): + # Test with float class labels. + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + + y_ = np.asarray(y, dtype=np.int32) + y_ = y_[:, np.newaxis] + + # This will raise a DataConversionWarning that we want to + # "always" raise, elsewhere the warnings gets ignored in the + # later tests, and the tests that check for this warning fail + warn_msg = ( + "A column-vector y was passed when a 1d array was expected. " + "Please change the shape of y to \\(n_samples, \\), for " + "example using ravel()." + ) + with pytest.warns(DataConversionWarning, match=warn_msg): + clf.fit(X, y_) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + +def test_mem_layout(): + # Test with different memory layouts of X and y + X_ = np.asfortranarray(X) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X_, y) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + X_ = np.ascontiguousarray(X) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X_, y) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + y_ = np.asarray(y, dtype=np.int32) + y_ = np.ascontiguousarray(y_) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X, y_) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + y_ = np.asarray(y, dtype=np.int32) + y_ = np.asfortranarray(y_) + clf = GradientBoostingClassifier(n_estimators=100, random_state=1) + clf.fit(X, y_) + assert_array_equal(clf.predict(T), true_result) + assert 100 == len(clf.estimators_) + + +@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_oob_improvement(GradientBoostingEstimator): + # Test if oob improvement has correct shape and regression test. + estimator = GradientBoostingEstimator( + n_estimators=100, random_state=1, subsample=0.5 + ) + estimator.fit(X, y) + assert estimator.oob_improvement_.shape[0] == 100 + # hard-coded regression test - change if modification in OOB computation + assert_array_almost_equal( + estimator.oob_improvement_[:5], + np.array([0.19, 0.15, 0.12, -0.11, 0.11]), + decimal=2, + ) + + +@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_oob_scores(GradientBoostingEstimator): + # Test if oob scores has correct shape and regression test. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + estimator = GradientBoostingEstimator( + n_estimators=100, random_state=1, subsample=0.5 + ) + estimator.fit(X, y) + assert estimator.oob_scores_.shape[0] == 100 + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + estimator = GradientBoostingEstimator( + n_estimators=100, + random_state=1, + subsample=0.5, + n_iter_no_change=5, + ) + estimator.fit(X, y) + assert estimator.oob_scores_.shape[0] < 100 + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + +@pytest.mark.parametrize( + "GradientBoostingEstimator, oob_attribute", + [ + (GradientBoostingClassifier, "oob_improvement_"), + (GradientBoostingClassifier, "oob_scores_"), + (GradientBoostingClassifier, "oob_score_"), + (GradientBoostingRegressor, "oob_improvement_"), + (GradientBoostingRegressor, "oob_scores_"), + (GradientBoostingRegressor, "oob_score_"), + ], +) +def test_oob_attributes_error(GradientBoostingEstimator, oob_attribute): + """ + Check that we raise an AttributeError when the OOB statistics were not computed. + """ + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + estimator = GradientBoostingEstimator( + n_estimators=100, + random_state=1, + subsample=1.0, + ) + estimator.fit(X, y) + with pytest.raises(AttributeError): + estimator.oob_attribute + + +def test_oob_multilcass_iris(): + # Check OOB improvement on multi-class dataset. + estimator = GradientBoostingClassifier( + n_estimators=100, loss="log_loss", random_state=1, subsample=0.5 + ) + estimator.fit(iris.data, iris.target) + score = estimator.score(iris.data, iris.target) + assert score > 0.9 + assert estimator.oob_improvement_.shape[0] == estimator.n_estimators + assert estimator.oob_scores_.shape[0] == estimator.n_estimators + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + estimator = GradientBoostingClassifier( + n_estimators=100, + loss="log_loss", + random_state=1, + subsample=0.5, + n_iter_no_change=5, + ) + estimator.fit(iris.data, iris.target) + score = estimator.score(iris.data, iris.target) + assert estimator.oob_improvement_.shape[0] < estimator.n_estimators + assert estimator.oob_scores_.shape[0] < estimator.n_estimators + assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_) + + # hard-coded regression test - change if modification in OOB computation + # FIXME: the following snippet does not yield the same results on 32 bits + # assert_array_almost_equal(estimator.oob_improvement_[:5], + # np.array([12.68, 10.45, 8.18, 6.43, 5.13]), + # decimal=2) + + +def test_verbose_output(): + # Check verbose=1 does not cause error. + import sys + from io import StringIO + + old_stdout = sys.stdout + sys.stdout = StringIO() + clf = GradientBoostingClassifier( + n_estimators=100, random_state=1, verbose=1, subsample=0.8 + ) + clf.fit(X, y) + verbose_output = sys.stdout + sys.stdout = old_stdout + + # check output + verbose_output.seek(0) + header = verbose_output.readline().rstrip() + # with OOB + true_header = " ".join(["%10s"] + ["%16s"] * 3) % ( + "Iter", + "Train Loss", + "OOB Improve", + "Remaining Time", + ) + assert true_header == header + + n_lines = sum(1 for l in verbose_output.readlines()) + # one for 1-10 and then 9 for 20-100 + assert 10 + 9 == n_lines + + +def test_more_verbose_output(): + # Check verbose=2 does not cause error. + import sys + from io import StringIO + + old_stdout = sys.stdout + sys.stdout = StringIO() + clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=2) + clf.fit(X, y) + verbose_output = sys.stdout + sys.stdout = old_stdout + + # check output + verbose_output.seek(0) + header = verbose_output.readline().rstrip() + # no OOB + true_header = " ".join(["%10s"] + ["%16s"] * 2) % ( + "Iter", + "Train Loss", + "Remaining Time", + ) + assert true_header == header + + n_lines = sum(1 for l in verbose_output.readlines()) + # 100 lines for n_estimators==100 + assert 100 == n_lines + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start(Cls, global_random_seed): + # Test if warm start equals fit. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed) + est = Cls(n_estimators=200, max_depth=1, random_state=global_random_seed) + est.fit(X, y) + + est_ws = Cls( + n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed + ) + est_ws.fit(X, y) + est_ws.set_params(n_estimators=200) + est_ws.fit(X, y) + + if Cls is GradientBoostingRegressor: + assert_allclose(est_ws.predict(X), est.predict(X)) + else: + # Random state is preserved and hence predict_proba must also be + # same + assert_array_equal(est_ws.predict(X), est.predict(X)) + assert_allclose(est_ws.predict_proba(X), est.predict_proba(X)) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_n_estimators(Cls, global_random_seed): + # Test if warm start equals fit - set n_estimators. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed) + est = Cls(n_estimators=300, max_depth=1, random_state=global_random_seed) + est.fit(X, y) + + est_ws = Cls( + n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed + ) + est_ws.fit(X, y) + est_ws.set_params(n_estimators=300) + est_ws.fit(X, y) + + assert_allclose(est_ws.predict(X), est.predict(X)) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_max_depth(Cls): + # Test if possible to fit trees of different depth in ensemble. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=110, max_depth=2) + est.fit(X, y) + + # last 10 trees have different depth + assert est.estimators_[0, 0].max_depth == 1 + for i in range(1, 11): + assert est.estimators_[-i, 0].max_depth == 2 + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_clear(Cls): + # Test if fit clears state. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1) + est.fit(X, y) + + est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True) + est_2.fit(X, y) # inits state + est_2.set_params(warm_start=False) + est_2.fit(X, y) # clears old state and equals est + + assert_array_almost_equal(est_2.predict(X), est.predict(X)) + + +@pytest.mark.parametrize("GradientBoosting", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_state_oob_scores(GradientBoosting): + """ + Check that the states of the OOB scores are cleared when used with `warm_start`. + """ + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + n_estimators = 100 + estimator = GradientBoosting( + n_estimators=n_estimators, + max_depth=1, + subsample=0.5, + warm_start=True, + random_state=1, + ) + estimator.fit(X, y) + oob_scores, oob_score = estimator.oob_scores_, estimator.oob_score_ + assert len(oob_scores) == n_estimators + assert oob_scores[-1] == pytest.approx(oob_score) + + n_more_estimators = 200 + estimator.set_params(n_estimators=n_more_estimators).fit(X, y) + assert len(estimator.oob_scores_) == n_more_estimators + assert_allclose(estimator.oob_scores_[:n_estimators], oob_scores) + + estimator.set_params(n_estimators=n_estimators, warm_start=False).fit(X, y) + assert estimator.oob_scores_ is not oob_scores + assert estimator.oob_score_ is not oob_score + assert_allclose(estimator.oob_scores_, oob_scores) + assert estimator.oob_score_ == pytest.approx(oob_score) + assert oob_scores[-1] == pytest.approx(oob_score) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_smaller_n_estimators(Cls): + # Test if warm start with smaller n_estimators raises error + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=99) + with pytest.raises(ValueError): + est.fit(X, y) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_equal_n_estimators(Cls): + # Test if warm start with equal n_estimators does nothing + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1) + est.fit(X, y) + + est2 = clone(est) + est2.set_params(n_estimators=est.n_estimators, warm_start=True) + est2.fit(X, y) + + assert_array_almost_equal(est2.predict(X), est.predict(X)) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_oob_switch(Cls): + # Test if oob can be turned on during warm start. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=100, max_depth=1, warm_start=True) + est.fit(X, y) + est.set_params(n_estimators=110, subsample=0.5) + est.fit(X, y) + + assert_array_equal(est.oob_improvement_[:100], np.zeros(100)) + assert_array_equal(est.oob_scores_[:100], np.zeros(100)) + + # the last 10 are not zeros + assert (est.oob_improvement_[-10:] != 0.0).all() + assert (est.oob_scores_[-10:] != 0.0).all() + + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_oob(Cls): + # Test if warm start OOB equals fit. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est = Cls(n_estimators=200, max_depth=1, subsample=0.5, random_state=1) + est.fit(X, y) + + est_ws = Cls( + n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True + ) + est_ws.fit(X, y) + est_ws.set_params(n_estimators=200) + est_ws.fit(X, y) + + assert_array_almost_equal(est_ws.oob_improvement_[:100], est.oob_improvement_[:100]) + assert_array_almost_equal(est_ws.oob_scores_[:100], est.oob_scores_[:100]) + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + assert est_ws.oob_scores_[-1] == pytest.approx(est_ws.oob_score_) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_warm_start_sparse(Cls, sparse_container): + # Test that all sparse matrix types are supported + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + est_dense = Cls( + n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True + ) + est_dense.fit(X, y) + est_dense.predict(X) + est_dense.set_params(n_estimators=200) + est_dense.fit(X, y) + y_pred_dense = est_dense.predict(X) + + X_sparse = sparse_container(X) + + est_sparse = Cls( + n_estimators=100, + max_depth=1, + subsample=0.5, + random_state=1, + warm_start=True, + ) + est_sparse.fit(X_sparse, y) + est_sparse.predict(X) + est_sparse.set_params(n_estimators=200) + est_sparse.fit(X_sparse, y) + y_pred_sparse = est_sparse.predict(X) + + assert_array_almost_equal( + est_dense.oob_improvement_[:100], est_sparse.oob_improvement_[:100] + ) + assert est_dense.oob_scores_[-1] == pytest.approx(est_dense.oob_score_) + assert_array_almost_equal(est_dense.oob_scores_[:100], est_sparse.oob_scores_[:100]) + assert est_sparse.oob_scores_[-1] == pytest.approx(est_sparse.oob_score_) + assert_array_almost_equal(y_pred_dense, y_pred_sparse) + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_warm_start_fortran(Cls, global_random_seed): + # Test that feeding a X in Fortran-ordered is giving the same results as + # in C-ordered + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed) + est_c = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True) + est_fortran = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True) + + est_c.fit(X, y) + est_c.set_params(n_estimators=11) + est_c.fit(X, y) + + X_fortran = np.asfortranarray(X) + est_fortran.fit(X_fortran, y) + est_fortran.set_params(n_estimators=11) + est_fortran.fit(X_fortran, y) + + assert_allclose(est_c.predict(X), est_fortran.predict(X)) + + +def early_stopping_monitor(i, est, locals): + """Returns True on the 10th iteration.""" + if i == 9: + return True + else: + return False + + +@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS) +def test_monitor_early_stopping(Cls): + # Test if monitor return value works. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + + est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5) + est.fit(X, y, monitor=early_stopping_monitor) + assert est.n_estimators == 20 # this is not altered + assert est.estimators_.shape[0] == 10 + assert est.train_score_.shape[0] == 10 + assert est.oob_improvement_.shape[0] == 10 + assert est.oob_scores_.shape[0] == 10 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + # try refit + est.set_params(n_estimators=30) + est.fit(X, y) + assert est.n_estimators == 30 + assert est.estimators_.shape[0] == 30 + assert est.train_score_.shape[0] == 30 + assert est.oob_improvement_.shape[0] == 30 + assert est.oob_scores_.shape[0] == 30 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + est = Cls( + n_estimators=20, max_depth=1, random_state=1, subsample=0.5, warm_start=True + ) + est.fit(X, y, monitor=early_stopping_monitor) + assert est.n_estimators == 20 + assert est.estimators_.shape[0] == 10 + assert est.train_score_.shape[0] == 10 + assert est.oob_improvement_.shape[0] == 10 + assert est.oob_scores_.shape[0] == 10 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + # try refit + est.set_params(n_estimators=30, warm_start=False) + est.fit(X, y) + assert est.n_estimators == 30 + assert est.train_score_.shape[0] == 30 + assert est.estimators_.shape[0] == 30 + assert est.oob_improvement_.shape[0] == 30 + assert est.oob_scores_.shape[0] == 30 + assert est.oob_scores_[-1] == pytest.approx(est.oob_score_) + + +def test_complete_classification(): + # Test greedy trees with max_depth + 1 leafs. + from sklearn.tree._tree import TREE_LEAF + + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + k = 4 + + est = GradientBoostingClassifier( + n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1 + ) + est.fit(X, y) + + tree = est.estimators_[0, 0].tree_ + assert tree.max_depth == k + assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1 + + +def test_complete_regression(): + # Test greedy trees with max_depth + 1 leafs. + from sklearn.tree._tree import TREE_LEAF + + k = 4 + + est = GradientBoostingRegressor( + n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1 + ) + est.fit(X_reg, y_reg) + + tree = est.estimators_[-1, 0].tree_ + assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1 + + +def test_zero_estimator_reg(global_random_seed): + # Test if init='zero' works for regression by checking that it is better + # than a simple baseline. + + baseline = DummyRegressor(strategy="mean").fit(X_reg, y_reg) + mse_baseline = mean_squared_error(baseline.predict(X_reg), y_reg) + est = GradientBoostingRegressor( + n_estimators=5, + max_depth=1, + random_state=global_random_seed, + init="zero", + learning_rate=0.5, + ) + est.fit(X_reg, y_reg) + y_pred = est.predict(X_reg) + mse_gbdt = mean_squared_error(y_reg, y_pred) + assert mse_gbdt < mse_baseline + + +def test_zero_estimator_clf(global_random_seed): + # Test if init='zero' works for classification. + X = iris.data + y = np.array(iris.target) + + est = GradientBoostingClassifier( + n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero" + ) + est.fit(X, y) + + assert est.score(X, y) > 0.96 + + # binary clf + mask = y != 0 + y[mask] = 1 + y[~mask] = 0 + est = GradientBoostingClassifier( + n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero" + ) + est.fit(X, y) + assert est.score(X, y) > 0.96 + + +@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_max_leaf_nodes_max_depth(GBEstimator): + # Test precedence of max_leaf_nodes over max_depth. + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + + k = 4 + + est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) + tree = est.estimators_[0, 0].tree_ + assert tree.max_depth == 1 + + est = GBEstimator(max_depth=1).fit(X, y) + tree = est.estimators_[0, 0].tree_ + assert tree.max_depth == 1 + + +@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS) +def test_min_impurity_decrease(GBEstimator): + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) + + est = GBEstimator(min_impurity_decrease=0.1) + est.fit(X, y) + for tree in est.estimators_.flat: + # Simply check if the parameter is passed on correctly. Tree tests + # will suffice for the actual working of this param + assert tree.min_impurity_decrease == 0.1 + + +def test_warm_start_wo_nestimators_change(): + # Test if warm_start does nothing if n_estimators is not changed. + # Regression test for #3513. + clf = GradientBoostingClassifier(n_estimators=10, warm_start=True) + clf.fit([[0, 1], [2, 3]], [0, 1]) + assert clf.estimators_.shape[0] == 10 + clf.fit([[0, 1], [2, 3]], [0, 1]) + assert clf.estimators_.shape[0] == 10 + + +@pytest.mark.parametrize( + ("loss", "value"), + [ + ("squared_error", 0.5), + ("absolute_error", 0.0), + ("huber", 0.5), + ("quantile", 0.5), + ], +) +def test_non_uniform_weights_toy_edge_case_reg(loss, value): + X = [[1, 0], [1, 0], [1, 0], [0, 1]] + y = [0, 0, 1, 0] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1] + gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss) + gb.fit(X, y, sample_weight=sample_weight) + assert gb.predict([[1, 0]])[0] >= value + + +def test_non_uniform_weights_toy_edge_case_clf(): + X = [[1, 0], [1, 0], [1, 0], [0, 1]] + y = [0, 0, 1, 0] + # ignore the first 2 training samples by setting their weight to 0 + sample_weight = [0, 0, 1, 1] + for loss in ("log_loss", "exponential"): + gb = GradientBoostingClassifier(n_estimators=5, loss=loss) + gb.fit(X, y, sample_weight=sample_weight) + assert_array_equal(gb.predict([[1, 0]]), [1]) + + +@skip_if_32bit +@pytest.mark.parametrize( + "EstimatorClass", (GradientBoostingClassifier, GradientBoostingRegressor) +) +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_sparse_input(EstimatorClass, sparse_container): + y, X = datasets.make_multilabel_classification( + random_state=0, n_samples=50, n_features=1, n_classes=20 + ) + y = y[:, 0] + X_sparse = sparse_container(X) + + dense = EstimatorClass( + n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7 + ).fit(X, y) + sparse = EstimatorClass( + n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7 + ).fit(X_sparse, y) + + assert_array_almost_equal(sparse.apply(X), dense.apply(X)) + assert_array_almost_equal(sparse.predict(X), dense.predict(X)) + assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) + + assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X)) + assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X)) + + if issubclass(EstimatorClass, GradientBoostingClassifier): + assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) + assert_array_almost_equal( + sparse.predict_log_proba(X), dense.predict_log_proba(X) + ) + + assert_array_almost_equal( + sparse.decision_function(X_sparse), sparse.decision_function(X) + ) + assert_array_almost_equal( + dense.decision_function(X_sparse), sparse.decision_function(X) + ) + for res_sparse, res in zip( + sparse.staged_decision_function(X_sparse), + sparse.staged_decision_function(X), + ): + assert_array_almost_equal(res_sparse, res) + + +@pytest.mark.parametrize( + "GradientBoostingEstimator", [GradientBoostingClassifier, GradientBoostingRegressor] +) +def test_gradient_boosting_early_stopping(GradientBoostingEstimator): + # Check if early stopping works as expected, that is empirically check that the + # number of trained estimators is increasing when the tolerance decreases. + + X, y = make_classification(n_samples=1000, random_state=0) + n_estimators = 1000 + + gb_large_tol = GradientBoostingEstimator( + n_estimators=n_estimators, + n_iter_no_change=10, + learning_rate=0.1, + max_depth=3, + random_state=42, + tol=1e-1, + ) + + gb_small_tol = GradientBoostingEstimator( + n_estimators=n_estimators, + n_iter_no_change=10, + learning_rate=0.1, + max_depth=3, + random_state=42, + tol=1e-3, + ) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + gb_large_tol.fit(X_train, y_train) + gb_small_tol.fit(X_train, y_train) + + assert gb_large_tol.n_estimators_ < gb_small_tol.n_estimators_ < n_estimators + + assert gb_large_tol.score(X_test, y_test) > 0.7 + assert gb_small_tol.score(X_test, y_test) > 0.7 + + +def test_gradient_boosting_without_early_stopping(): + # When early stopping is not used, the number of trained estimators + # must be the one specified. + X, y = make_classification(n_samples=1000, random_state=0) + + gbc = GradientBoostingClassifier( + n_estimators=50, learning_rate=0.1, max_depth=3, random_state=42 + ) + gbc.fit(X, y) + gbr = GradientBoostingRegressor( + n_estimators=30, learning_rate=0.1, max_depth=3, random_state=42 + ) + gbr.fit(X, y) + + # The number of trained estimators must be the one specified. + assert gbc.n_estimators_ == 50 + assert gbr.n_estimators_ == 30 + + +def test_gradient_boosting_validation_fraction(): + X, y = make_classification(n_samples=1000, random_state=0) + + gbc = GradientBoostingClassifier( + n_estimators=100, + n_iter_no_change=10, + validation_fraction=0.1, + learning_rate=0.1, + max_depth=3, + random_state=42, + ) + gbc2 = clone(gbc).set_params(validation_fraction=0.3) + gbc3 = clone(gbc).set_params(n_iter_no_change=20) + + gbr = GradientBoostingRegressor( + n_estimators=100, + n_iter_no_change=10, + learning_rate=0.1, + max_depth=3, + validation_fraction=0.1, + random_state=42, + ) + gbr2 = clone(gbr).set_params(validation_fraction=0.3) + gbr3 = clone(gbr).set_params(n_iter_no_change=20) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) + # Check if validation_fraction has an effect + gbc.fit(X_train, y_train) + gbc2.fit(X_train, y_train) + assert gbc.n_estimators_ != gbc2.n_estimators_ + + gbr.fit(X_train, y_train) + gbr2.fit(X_train, y_train) + assert gbr.n_estimators_ != gbr2.n_estimators_ + + # Check if n_estimators_ increase monotonically with n_iter_no_change + # Set validation + gbc3.fit(X_train, y_train) + gbr3.fit(X_train, y_train) + assert gbr.n_estimators_ < gbr3.n_estimators_ + assert gbc.n_estimators_ < gbc3.n_estimators_ + + +def test_early_stopping_stratified(): + # Make sure data splitting for early stopping is stratified + X = [[1, 2], [2, 3], [3, 4], [4, 5]] + y = [0, 0, 0, 1] + + gbc = GradientBoostingClassifier(n_iter_no_change=5) + with pytest.raises( + ValueError, match="The least populated class in y has only 1 member" + ): + gbc.fit(X, y) + + +def _make_multiclass(): + return make_classification(n_classes=3, n_clusters_per_class=1) + + +@pytest.mark.parametrize( + "gb, dataset_maker, init_estimator", + [ + (GradientBoostingClassifier, make_classification, DummyClassifier), + (GradientBoostingClassifier, _make_multiclass, DummyClassifier), + (GradientBoostingRegressor, make_regression, DummyRegressor), + ], + ids=["binary classification", "multiclass classification", "regression"], +) +def test_gradient_boosting_with_init( + gb, dataset_maker, init_estimator, global_random_seed +): + # Check that GradientBoostingRegressor works when init is a sklearn + # estimator. + # Check that an error is raised if trying to fit with sample weight but + # initial estimator does not support sample weight + + X, y = dataset_maker() + sample_weight = np.random.RandomState(global_random_seed).rand(100) + + # init supports sample weights + init_est = init_estimator() + gb(init=init_est).fit(X, y, sample_weight=sample_weight) + + # init does not support sample weights + init_est = NoSampleWeightWrapper(init_estimator()) + gb(init=init_est).fit(X, y) # ok no sample weights + with pytest.raises(ValueError, match="estimator.*does not support sample weights"): + gb(init=init_est).fit(X, y, sample_weight=sample_weight) + + +def test_gradient_boosting_with_init_pipeline(): + # Check that the init estimator can be a pipeline (see issue #13466) + + X, y = make_regression(random_state=0) + init = make_pipeline(LinearRegression()) + gb = GradientBoostingRegressor(init=init) + gb.fit(X, y) # pipeline without sample_weight works fine + + with pytest.raises( + ValueError, + match="The initial estimator Pipeline does not support sample weights", + ): + gb.fit(X, y, sample_weight=np.ones(X.shape[0])) + + # Passing sample_weight to a pipeline raises a ValueError. This test makes + # sure we make the distinction between ValueError raised by a pipeline that + # was passed sample_weight, and a InvalidParameterError raised by a regular + # estimator whose input checking failed. + invalid_nu = 1.5 + err_msg = ( + "The 'nu' parameter of NuSVR must be a float in the" + f" range (0.0, 1.0]. Got {invalid_nu} instead." + ) + with pytest.raises(InvalidParameterError, match=re.escape(err_msg)): + # Note that NuSVR properly supports sample_weight + init = NuSVR(gamma="auto", nu=invalid_nu) + gb = GradientBoostingRegressor(init=init) + gb.fit(X, y, sample_weight=np.ones(X.shape[0])) + + +def test_early_stopping_n_classes(): + # when doing early stopping (_, , y_train, _ = train_test_split(X, y)) + # there might be classes in y that are missing in y_train. As the init + # estimator will be trained on y_train, we need to raise an error if this + # happens. + + X = [[1]] * 10 + y = [0, 0] + [1] * 8 # only 2 negative class over 10 samples + gb = GradientBoostingClassifier( + n_iter_no_change=5, random_state=0, validation_fraction=0.8 + ) + with pytest.raises( + ValueError, match="The training data after the early stopping split" + ): + gb.fit(X, y) + + # No error if we let training data be big enough + gb = GradientBoostingClassifier( + n_iter_no_change=5, random_state=0, validation_fraction=0.4 + ) + + +def test_gbr_degenerate_feature_importances(): + # growing an ensemble of single node trees. See #13620 + X = np.zeros((10, 10)) + y = np.ones((10,)) + gbr = GradientBoostingRegressor().fit(X, y) + assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64)) + + +def test_huber_vs_mean_and_median(): + """Check that huber lies between absolute and squared error.""" + n_rep = 100 + n_samples = 10 + y = np.tile(np.arange(n_samples), n_rep) + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + + rng = np.random.RandomState(42) + # We want an asymmetric distribution. + y = y + rng.exponential(scale=1, size=y.shape) + + gbt_absolute_error = GradientBoostingRegressor(loss="absolute_error").fit(X, y) + gbt_huber = GradientBoostingRegressor(loss="huber").fit(X, y) + gbt_squared_error = GradientBoostingRegressor().fit(X, y) + + gbt_huber_predictions = gbt_huber.predict(X) + assert np.all(gbt_absolute_error.predict(X) <= gbt_huber_predictions) + assert np.all(gbt_huber_predictions <= gbt_squared_error.predict(X)) + + +def test_safe_divide(): + """Test that _safe_divide handles division by zero.""" + with warnings.catch_warnings(): + warnings.simplefilter("error") + assert _safe_divide(np.float64(1e300), 0) == 0 + assert _safe_divide(np.float64(0.0), np.float64(0.0)) == 0 + with pytest.warns(RuntimeWarning, match="overflow"): + # np.finfo(float).max = 1.7976931348623157e+308 + _safe_divide(np.float64(1e300), 1e-10) + + +def test_squared_error_exact_backward_compat(): + """Test squared error GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingRegressor(loss="squared_error", n_estimators=100).fit(X, y) + + pred_result = np.array( + [ + 1.39245726e-04, + 1.00010468e00, + 2.00007043e00, + 3.00004051e00, + 4.00000802e00, + 4.99998972e00, + 5.99996312e00, + 6.99993395e00, + 7.99989372e00, + 8.99985660e00, + ] + ) + assert_allclose(gbt.predict(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 4.87246390e-08, + 3.95590036e-08, + 3.21267865e-08, + 2.60970300e-08, + 2.11820178e-08, + 1.71995782e-08, + 1.39695549e-08, + 1.13391770e-08, + 9.19931587e-09, + 7.47000575e-09, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + # Same but with sample_weights + sample_weights = np.tile([1, 10], n_samples // 2) + gbt = GradientBoostingRegressor(loss="squared_error", n_estimators=100).fit( + X, y, sample_weight=sample_weights + ) + + pred_result = np.array( + [ + 1.52391462e-04, + 1.00011168e00, + 2.00007724e00, + 3.00004638e00, + 4.00001302e00, + 4.99999873e00, + 5.99997093e00, + 6.99994329e00, + 7.99991290e00, + 8.99988727e00, + ] + ) + assert_allclose(gbt.predict(X), pred_result, rtol=1e-6, atol=1e-5) + + train_score = np.array( + [ + 4.12445296e-08, + 3.34418322e-08, + 2.71151383e-08, + 2.19782469e-08, + 1.78173649e-08, + 1.44461976e-08, + 1.17120123e-08, + 9.49485678e-09, + 7.69772505e-09, + 6.24155316e-09, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-3, atol=1e-11) + + +@skip_if_32bit +def test_huber_exact_backward_compat(): + """Test huber GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingRegressor(loss="huber", n_estimators=100, alpha=0.8).fit(X, y) + + assert_allclose(gbt._loss.closs.delta, 0.0001655688041282133) + + pred_result = np.array( + [ + 1.48120765e-04, + 9.99949174e-01, + 2.00116957e00, + 2.99986716e00, + 4.00012064e00, + 5.00002462e00, + 5.99998898e00, + 6.99692549e00, + 8.00006356e00, + 8.99985099e00, + ] + ) + assert_allclose(gbt.predict(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 2.59484709e-07, + 2.19165900e-07, + 1.89644782e-07, + 1.64556454e-07, + 1.38705110e-07, + 1.20373736e-07, + 1.04746082e-07, + 9.13835687e-08, + 8.20245756e-08, + 7.17122188e-08, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + +def test_binomial_error_exact_backward_compat(): + """Test binary log_loss GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) % 2 + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingClassifier(loss="log_loss", n_estimators=100).fit(X, y) + + pred_result = np.array( + [ + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + [9.99978098e-01, 2.19017313e-05], + [2.19017313e-05, 9.99978098e-01], + ] + ) + assert_allclose(gbt.predict_proba(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 1.07742210e-04, + 9.74889078e-05, + 8.82113863e-05, + 7.98167784e-05, + 7.22210566e-05, + 6.53481907e-05, + 5.91293869e-05, + 5.35023988e-05, + 4.84109045e-05, + 4.38039423e-05, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + +def test_multinomial_error_exact_backward_compat(): + """Test multiclass log_loss GBT backward compat on a simple dataset. + + The results to compare against are taken from scikit-learn v1.2.0. + """ + n_samples = 10 + y = np.arange(n_samples) % 4 + x1 = np.minimum(y, n_samples / 2) + x2 = np.minimum(-y, -n_samples / 2) + X = np.c_[x1, x2] + gbt = GradientBoostingClassifier(loss="log_loss", n_estimators=100).fit(X, y) + + pred_result = np.array( + [ + [9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08], + [1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08], + [1.19417637e-07, 1.19417637e-07, 9.99999675e-01, 8.60526098e-08], + [1.19417637e-07, 1.19417637e-07, 8.60526088e-08, 9.99999675e-01], + [9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08], + [1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08], + [1.19417637e-07, 1.19417637e-07, 9.99999675e-01, 8.60526098e-08], + [1.19417637e-07, 1.19417637e-07, 8.60526088e-08, 9.99999675e-01], + [9.99999727e-01, 1.11956255e-07, 8.04921671e-08, 8.04921668e-08], + [1.11956254e-07, 9.99999727e-01, 8.04921671e-08, 8.04921668e-08], + ] + ) + assert_allclose(gbt.predict_proba(X), pred_result, rtol=1e-8) + + train_score = np.array( + [ + 1.13300150e-06, + 9.75183397e-07, + 8.39348103e-07, + 7.22433588e-07, + 6.21804338e-07, + 5.35191943e-07, + 4.60643966e-07, + 3.96479930e-07, + 3.41253434e-07, + 2.93719550e-07, + ] + ) + assert_allclose(gbt.train_score_[-10:], train_score, rtol=1e-8) + + +def test_gb_denominator_zero(global_random_seed): + """Test _update_terminal_regions denominator is not zero. + + For instance for log loss based binary classification, the line search step might + become nan/inf as denominator = hessian = prob * (1 - prob) and prob = 0 or 1 can + happen. + Here, we create a situation were this happens (at least with roughly 80%) based + on the random seed. + """ + X, y = datasets.make_hastie_10_2(n_samples=100, random_state=20) + + params = { + "learning_rate": 1.0, + "subsample": 0.5, + "n_estimators": 100, + "max_leaf_nodes": 4, + "max_depth": None, + "random_state": global_random_seed, + "min_samples_leaf": 2, + } + + clf = GradientBoostingClassifier(**params) + # _safe_devide would raise a RuntimeWarning + with warnings.catch_warnings(): + warnings.simplefilter("error") + clf.fit(X, y) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py new file mode 100644 index 0000000000000000000000000000000000000000..22dcc92906a6b12dbb2aaea06193347fad5b02a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_iforest.py @@ -0,0 +1,363 @@ +""" +Testing for Isolation Forest algorithm (sklearn.ensemble.iforest). +""" + +# Authors: Nicolas Goix +# Alexandre Gramfort +# License: BSD 3 clause + +import warnings +from unittest.mock import Mock, patch + +import numpy as np +import pytest + +from sklearn.datasets import load_diabetes, load_iris, make_classification +from sklearn.ensemble import IsolationForest +from sklearn.ensemble._iforest import _average_path_length +from sklearn.metrics import roc_auc_score +from sklearn.model_selection import ParameterGrid, train_test_split +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS + +# load iris & diabetes dataset +iris = load_iris() +diabetes = load_diabetes() + + +def test_iforest(global_random_seed): + """Check Isolation Forest for various parameter settings.""" + X_train = np.array([[0, 1], [1, 2]]) + X_test = np.array([[2, 1], [1, 1]]) + + grid = ParameterGrid( + {"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]} + ) + + with ignore_warnings(): + for params in grid: + IsolationForest(random_state=global_random_seed, **params).fit( + X_train + ).predict(X_test) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_iforest_sparse(global_random_seed, sparse_container): + """Check IForest for various parameter settings on sparse input.""" + rng = check_random_state(global_random_seed) + X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) + grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]}) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + for params in grid: + # Trained on sparse format + sparse_classifier = IsolationForest( + n_estimators=10, random_state=global_random_seed, **params + ).fit(X_train_sparse) + sparse_results = sparse_classifier.predict(X_test_sparse) + + # Trained on dense format + dense_classifier = IsolationForest( + n_estimators=10, random_state=global_random_seed, **params + ).fit(X_train) + dense_results = dense_classifier.predict(X_test) + + assert_array_equal(sparse_results, dense_results) + + +def test_iforest_error(): + """Test that it gives proper exception on deficient input.""" + X = iris.data + + # The dataset has less than 256 samples, explicitly setting + # max_samples > n_samples should result in a warning. If not set + # explicitly there should be no warning + warn_msg = "max_samples will be set to n_samples for estimation" + with pytest.warns(UserWarning, match=warn_msg): + IsolationForest(max_samples=1000).fit(X) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + IsolationForest(max_samples="auto").fit(X) + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + IsolationForest(max_samples=np.int64(2)).fit(X) + + # test X_test n_features match X_train one: + with pytest.raises(ValueError): + IsolationForest().fit(X).predict(X[:, 1:]) + + +def test_recalculate_max_depth(): + """Check max_depth recalculation when max_samples is reset to n_samples""" + X = iris.data + clf = IsolationForest().fit(X) + for est in clf.estimators_: + assert est.max_depth == int(np.ceil(np.log2(X.shape[0]))) + + +def test_max_samples_attribute(): + X = iris.data + clf = IsolationForest().fit(X) + assert clf.max_samples_ == X.shape[0] + + clf = IsolationForest(max_samples=500) + warn_msg = "max_samples will be set to n_samples for estimation" + with pytest.warns(UserWarning, match=warn_msg): + clf.fit(X) + assert clf.max_samples_ == X.shape[0] + + clf = IsolationForest(max_samples=0.4).fit(X) + assert clf.max_samples_ == 0.4 * X.shape[0] + + +def test_iforest_parallel_regression(global_random_seed): + """Check parallel regression.""" + rng = check_random_state(global_random_seed) + + X_train, X_test = train_test_split(diabetes.data, random_state=rng) + + ensemble = IsolationForest(n_jobs=3, random_state=global_random_seed).fit(X_train) + + ensemble.set_params(n_jobs=1) + y1 = ensemble.predict(X_test) + ensemble.set_params(n_jobs=2) + y2 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y2) + + ensemble = IsolationForest(n_jobs=1, random_state=global_random_seed).fit(X_train) + + y3 = ensemble.predict(X_test) + assert_array_almost_equal(y1, y3) + + +def test_iforest_performance(global_random_seed): + """Test Isolation Forest performs well""" + + # Generate train/test data + rng = check_random_state(global_random_seed) + X = 0.3 * rng.randn(600, 2) + X = rng.permutation(np.vstack((X + 2, X - 2))) + X_train = X[:1000] + + # Generate some abnormal novel observations + X_outliers = rng.uniform(low=-1, high=1, size=(200, 2)) + X_test = np.vstack((X[1000:], X_outliers)) + y_test = np.array([0] * 200 + [1] * 200) + + # fit the model + clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train) + + # predict scores (the lower, the more normal) + y_pred = -clf.decision_function(X_test) + + # check that there is at most 6 errors (false positive or false negative) + assert roc_auc_score(y_test, y_pred) > 0.98 + + +@pytest.mark.parametrize("contamination", [0.25, "auto"]) +def test_iforest_works(contamination, global_random_seed): + # toy sample (the last two samples are outliers) + X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]] + + # Test IsolationForest + clf = IsolationForest(random_state=global_random_seed, contamination=contamination) + clf.fit(X) + decision_func = -clf.decision_function(X) + pred = clf.predict(X) + # assert detect outliers: + assert np.min(decision_func[-2:]) > np.max(decision_func[:-2]) + assert_array_equal(pred, 6 * [1] + 2 * [-1]) + + +def test_max_samples_consistency(): + # Make sure validated max_samples in iforest and BaseBagging are identical + X = iris.data + clf = IsolationForest().fit(X) + assert clf.max_samples_ == clf._max_samples + + +def test_iforest_subsampled_features(): + # It tests non-regression for #5732 which failed at predict. + rng = check_random_state(0) + X_train, X_test, y_train, y_test = train_test_split( + diabetes.data[:50], diabetes.target[:50], random_state=rng + ) + clf = IsolationForest(max_features=0.8) + clf.fit(X_train, y_train) + clf.predict(X_test) + + +def test_iforest_average_path_length(): + # It tests non-regression for #8549 which used the wrong formula + # for average path length, strictly for the integer case + # Updated to check average path length when input is <= 2 (issue #11839) + result_one = 2.0 * (np.log(4.0) + np.euler_gamma) - 2.0 * 4.0 / 5.0 + result_two = 2.0 * (np.log(998.0) + np.euler_gamma) - 2.0 * 998.0 / 999.0 + assert_allclose(_average_path_length([0]), [0.0]) + assert_allclose(_average_path_length([1]), [0.0]) + assert_allclose(_average_path_length([2]), [1.0]) + assert_allclose(_average_path_length([5]), [result_one]) + assert_allclose(_average_path_length([999]), [result_two]) + assert_allclose( + _average_path_length(np.array([1, 2, 5, 999])), + [0.0, 1.0, result_one, result_two], + ) + # _average_path_length is increasing + avg_path_length = _average_path_length(np.arange(5)) + assert_array_equal(avg_path_length, np.sort(avg_path_length)) + + +def test_score_samples(): + X_train = [[1, 1], [1, 2], [2, 1]] + clf1 = IsolationForest(contamination=0.1).fit(X_train) + clf2 = IsolationForest().fit(X_train) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), + clf1.decision_function([[2.0, 2.0]]) + clf1.offset_, + ) + assert_array_equal( + clf2.score_samples([[2.0, 2.0]]), + clf2.decision_function([[2.0, 2.0]]) + clf2.offset_, + ) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]]) + ) + + +def test_iforest_warm_start(): + """Test iterative addition of iTrees to an iForest""" + + rng = check_random_state(0) + X = rng.randn(20, 2) + + # fit first 10 trees + clf = IsolationForest( + n_estimators=10, max_samples=20, random_state=rng, warm_start=True + ) + clf.fit(X) + # remember the 1st tree + tree_1 = clf.estimators_[0] + # fit another 10 trees + clf.set_params(n_estimators=20) + clf.fit(X) + # expecting 20 fitted trees and no overwritten trees + assert len(clf.estimators_) == 20 + assert clf.estimators_[0] is tree_1 + + +# mock get_chunk_n_rows to actually test more than one chunk (here one +# chunk has 3 rows): +@patch( + "sklearn.ensemble._iforest.get_chunk_n_rows", + side_effect=Mock(**{"return_value": 3}), +) +@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)]) +def test_iforest_chunks_works1( + mocked_get_chunk, contamination, n_predict_calls, global_random_seed +): + test_iforest_works(contamination, global_random_seed) + assert mocked_get_chunk.call_count == n_predict_calls + + +# idem with chunk_size = 10 rows +@patch( + "sklearn.ensemble._iforest.get_chunk_n_rows", + side_effect=Mock(**{"return_value": 10}), +) +@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)]) +def test_iforest_chunks_works2( + mocked_get_chunk, contamination, n_predict_calls, global_random_seed +): + test_iforest_works(contamination, global_random_seed) + assert mocked_get_chunk.call_count == n_predict_calls + + +def test_iforest_with_uniform_data(): + """Test whether iforest predicts inliers when using uniform data""" + + # 2-d array of all 1s + X = np.ones((100, 10)) + iforest = IsolationForest() + iforest.fit(X) + + rng = np.random.RandomState(0) + + assert all(iforest.predict(X) == 1) + assert all(iforest.predict(rng.randn(100, 10)) == 1) + assert all(iforest.predict(X + 1) == 1) + assert all(iforest.predict(X - 1) == 1) + + # 2-d array where columns contain the same value across rows + X = np.repeat(rng.randn(1, 10), 100, 0) + iforest = IsolationForest() + iforest.fit(X) + + assert all(iforest.predict(X) == 1) + assert all(iforest.predict(rng.randn(100, 10)) == 1) + assert all(iforest.predict(np.ones((100, 10))) == 1) + + # Single row + X = rng.randn(1, 10) + iforest = IsolationForest() + iforest.fit(X) + + assert all(iforest.predict(X) == 1) + assert all(iforest.predict(rng.randn(100, 10)) == 1) + assert all(iforest.predict(np.ones((100, 10))) == 1) + + +@pytest.mark.parametrize("csc_container", CSC_CONTAINERS) +def test_iforest_with_n_jobs_does_not_segfault(csc_container): + """Check that Isolation Forest does not segfault with n_jobs=2 + + Non-regression test for #23252 + """ + X, _ = make_classification(n_samples=85_000, n_features=100, random_state=0) + X = csc_container(X) + IsolationForest(n_estimators=10, max_samples=256, n_jobs=2).fit(X) + + +def test_iforest_preserve_feature_names(): + """Check that feature names are preserved when contamination is not "auto". + + Feature names are required for consistency checks during scoring. + + Non-regression test for Issue #25844 + """ + pd = pytest.importorskip("pandas") + rng = np.random.RandomState(0) + + X = pd.DataFrame(data=rng.randn(4), columns=["a"]) + model = IsolationForest(random_state=0, contamination=0.05) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + model.fit(X) + + +@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS) +def test_iforest_sparse_input_float_contamination(sparse_container): + """Check that `IsolationForest` accepts sparse matrix input and float value for + contamination. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27626 + """ + X, _ = make_classification(n_samples=50, n_features=4, random_state=0) + X = sparse_container(X) + X.sort_indices() + contamination = 0.1 + iforest = IsolationForest( + n_estimators=5, contamination=contamination, random_state=0 + ).fit(X) + + X_decision = iforest.decision_function(X) + assert (X_decision < 0).sum() / X.shape[0] == pytest.approx(contamination) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py new file mode 100644 index 0000000000000000000000000000000000000000..0d1493529e318ed00a2b7fe643f2ccd2fea92b8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py @@ -0,0 +1,890 @@ +"""Test the stacking classifier and regressor.""" + +# Authors: Guillaume Lemaitre +# License: BSD 3 clause + +from unittest.mock import Mock + +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from scipy import sparse + +from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone +from sklearn.datasets import ( + load_breast_cancer, + load_diabetes, + load_iris, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + StackingClassifier, + StackingRegressor, +) +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.linear_model import ( + LinearRegression, + LogisticRegression, + Ridge, + RidgeClassifier, +) +from sklearn.model_selection import KFold, StratifiedKFold, train_test_split +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neural_network import MLPClassifier +from sklearn.preprocessing import scale +from sklearn.svm import SVC, LinearSVC, LinearSVR +from sklearn.utils._mocking import CheckingClassifier +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +diabetes = load_diabetes() +X_diabetes, y_diabetes = diabetes.data, diabetes.target +iris = load_iris() +X_iris, y_iris = iris.data, iris.target +X_multilabel, y_multilabel = make_multilabel_classification( + n_classes=3, random_state=42 +) +X_binary, y_binary = make_classification(n_classes=2, random_state=42) + + +@pytest.mark.parametrize( + "cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)] +) +@pytest.mark.parametrize( + "final_estimator", [None, RandomForestClassifier(random_state=42)] +) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_classifier_iris(cv, final_estimator, passthrough): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, y_test = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC(dual="auto"))] + clf = StackingClassifier( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + passthrough=passthrough, + ) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + assert clf.score(X_test, y_test) > 0.8 + + X_trans = clf.transform(X_test) + expected_column_count = 10 if passthrough else 6 + assert X_trans.shape[1] == expected_column_count + if passthrough: + assert_allclose(X_test, X_trans[:, -4:]) + + clf.set_params(lr="drop") + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + if final_estimator is None: + # LogisticRegression has decision_function method + clf.decision_function(X_test) + + X_trans = clf.transform(X_test) + expected_column_count_drop = 7 if passthrough else 3 + assert X_trans.shape[1] == expected_column_count_drop + if passthrough: + assert_allclose(X_test, X_trans[:, -4:]) + + +def test_stacking_classifier_drop_column_binary_classification(): + # check that a column is dropped in binary classification + X, y = load_breast_cancer(return_X_y=True) + X_train, X_test, y_train, _ = train_test_split( + scale(X), y, stratify=y, random_state=42 + ) + + # both classifiers implement 'predict_proba' and will both drop one column + estimators = [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(random_state=42)), + ] + clf = StackingClassifier(estimators=estimators, cv=3) + + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert X_trans.shape[1] == 2 + + # LinearSVC does not implement 'predict_proba' and will not drop one column + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC(dual="auto"))] + clf.set_params(estimators=estimators) + + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert X_trans.shape[1] == 2 + + +def test_stacking_classifier_drop_estimator(): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + estimators = [("lr", "drop"), ("svc", LinearSVC(dual="auto", random_state=0))] + rf = RandomForestClassifier(n_estimators=10, random_state=42) + clf = StackingClassifier( + estimators=[("svc", LinearSVC(dual="auto", random_state=0))], + final_estimator=rf, + cv=5, + ) + clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5) + + clf.fit(X_train, y_train) + clf_drop.fit(X_train, y_train) + assert_allclose(clf.predict(X_test), clf_drop.predict(X_test)) + assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test)) + assert_allclose(clf.transform(X_test), clf_drop.transform(X_test)) + + +def test_stacking_regressor_drop_estimator(): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_diabetes), y_diabetes, random_state=42 + ) + estimators = [("lr", "drop"), ("svr", LinearSVR(dual="auto", random_state=0))] + rf = RandomForestRegressor(n_estimators=10, random_state=42) + reg = StackingRegressor( + estimators=[("svr", LinearSVR(dual="auto", random_state=0))], + final_estimator=rf, + cv=5, + ) + reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5) + + reg.fit(X_train, y_train) + reg_drop.fit(X_train, y_train) + assert_allclose(reg.predict(X_test), reg_drop.predict(X_test)) + assert_allclose(reg.transform(X_test), reg_drop.transform(X_test)) + + +@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)]) +@pytest.mark.parametrize( + "final_estimator, predict_params", + [ + (None, {}), + (RandomForestRegressor(random_state=42), {}), + (DummyRegressor(), {"return_std": True}), + ], +) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_diabetes), y_diabetes, random_state=42 + ) + estimators = [("lr", LinearRegression()), ("svr", LinearSVR(dual="auto"))] + reg = StackingRegressor( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + passthrough=passthrough, + ) + reg.fit(X_train, y_train) + result = reg.predict(X_test, **predict_params) + expected_result_length = 2 if predict_params else 1 + if predict_params: + assert len(result) == expected_result_length + + X_trans = reg.transform(X_test) + expected_column_count = 12 if passthrough else 2 + assert X_trans.shape[1] == expected_column_count + if passthrough: + assert_allclose(X_test, X_trans[:, -10:]) + + reg.set_params(lr="drop") + reg.fit(X_train, y_train) + reg.predict(X_test) + + X_trans = reg.transform(X_test) + expected_column_count_drop = 11 if passthrough else 1 + assert X_trans.shape[1] == expected_column_count_drop + if passthrough: + assert_allclose(X_test, X_trans[:, -10:]) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_stacking_regressor_sparse_passthrough(sparse_container): + # Check passthrough behavior on a sparse X matrix + X_train, X_test, y_train, _ = train_test_split( + sparse_container(scale(X_diabetes)), y_diabetes, random_state=42 + ) + estimators = [("lr", LinearRegression()), ("svr", LinearSVR(dual="auto"))] + rf = RandomForestRegressor(n_estimators=10, random_state=42) + clf = StackingRegressor( + estimators=estimators, final_estimator=rf, cv=5, passthrough=True + ) + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert_allclose_dense_sparse(X_test, X_trans[:, -10:]) + assert sparse.issparse(X_trans) + assert X_test.format == X_trans.format + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_stacking_classifier_sparse_passthrough(sparse_container): + # Check passthrough behavior on a sparse X matrix + X_train, X_test, y_train, _ = train_test_split( + sparse_container(scale(X_iris)), y_iris, random_state=42 + ) + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC(dual="auto"))] + rf = RandomForestClassifier(n_estimators=10, random_state=42) + clf = StackingClassifier( + estimators=estimators, final_estimator=rf, cv=5, passthrough=True + ) + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert_allclose_dense_sparse(X_test, X_trans[:, -4:]) + assert sparse.issparse(X_trans) + assert X_test.format == X_trans.format + + +def test_stacking_classifier_drop_binary_prob(): + # check that classifier will drop one of the probability column for + # binary classification problem + + # Select only the 2 first classes + X_, y_ = scale(X_iris[:100]), y_iris[:100] + + estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())] + clf = StackingClassifier(estimators=estimators) + clf.fit(X_, y_) + X_meta = clf.transform(X_) + assert X_meta.shape[1] == 2 + + +class NoWeightRegressor(RegressorMixin, BaseEstimator): + def fit(self, X, y): + self.reg = DummyRegressor() + return self.reg.fit(X, y) + + def predict(self, X): + return np.ones(X.shape[0]) + + +class NoWeightClassifier(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + self.clf = DummyClassifier(strategy="stratified") + return self.clf.fit(X, y) + + +@pytest.mark.parametrize( + "y, params, type_err, msg_err", + [ + (y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("svm", SVC(max_iter=50_000)), + ], + "stack_method": "predict_proba", + }, + ValueError, + "does not implement the method predict_proba", + ), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("cor", NoWeightClassifier()), + ] + }, + TypeError, + "does not support sample weight", + ), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("cor", LinearSVC(dual="auto", max_iter=50_000)), + ], + "final_estimator": NoWeightClassifier(), + }, + TypeError, + "does not support sample weight", + ), + ], +) +def test_stacking_classifier_error(y, params, type_err, msg_err): + with pytest.raises(type_err, match=msg_err): + clf = StackingClassifier(**params, cv=3) + clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0])) + + +@pytest.mark.parametrize( + "y, params, type_err, msg_err", + [ + (y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"), + ( + y_diabetes, + {"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]}, + TypeError, + "does not support sample weight", + ), + ( + y_diabetes, + { + "estimators": [ + ("lr", LinearRegression()), + ("cor", LinearSVR(dual="auto")), + ], + "final_estimator": NoWeightRegressor(), + }, + TypeError, + "does not support sample weight", + ), + ], +) +def test_stacking_regressor_error(y, params, type_err, msg_err): + with pytest.raises(type_err, match=msg_err): + reg = StackingRegressor(**params, cv=3) + reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0])) + + +@pytest.mark.parametrize( + "estimator, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("svm", LinearSVC(dual="auto", random_state=0)), + ] + ), + X_iris[:100], + y_iris[:100], + ), # keep only classes 0 and 1 + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto", random_state=0)), + ] + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_randomness(estimator, X, y): + # checking that fixing the random state of the CV will lead to the same + # results + estimator_full = clone(estimator) + estimator_full.set_params( + cv=KFold(shuffle=True, random_state=np.random.RandomState(0)) + ) + + estimator_drop = clone(estimator) + estimator_drop.set_params(lr="drop") + estimator_drop.set_params( + cv=KFold(shuffle=True, random_state=np.random.RandomState(0)) + ) + + assert_allclose( + estimator_full.fit(X, y).transform(X)[:, 1:], + estimator_drop.fit(X, y).transform(X), + ) + + +def test_stacking_classifier_stratify_default(): + # check that we stratify the classes for the default CV + clf = StackingClassifier( + estimators=[ + ("lr", LogisticRegression(max_iter=10_000)), + ("svm", LinearSVC(dual="auto", max_iter=10_000)), + ] + ) + # since iris is not shuffled, a simple k-fold would not contain the + # 3 classes during training + clf.fit(X_iris, y_iris) + + +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(dual="auto", random_state=42)), + ], + final_estimator=LogisticRegression(), + cv=KFold(shuffle=True, random_state=42), + ), + *load_breast_cancer(return_X_y=True), + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto", random_state=42)), + ], + final_estimator=LinearRegression(), + cv=KFold(shuffle=True, random_state=42), + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_with_sample_weight(stacker, X, y): + # check that sample weights has an influence on the fitting + # note: ConvergenceWarning are catch since we are not worrying about the + # convergence here + n_half_samples = len(y) // 2 + total_sample_weight = np.array( + [0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples) + ) + X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split( + X, y, total_sample_weight, random_state=42 + ) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train) + y_pred_no_weight = stacker.predict(X_test) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape)) + y_pred_unit_weight = stacker.predict(X_test) + + assert_allclose(y_pred_no_weight, y_pred_unit_weight) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train, sample_weight=sample_weight_train) + y_pred_biased = stacker.predict(X_test) + + assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0 + + +def test_stacking_classifier_sample_weight_fit_param(): + # check sample_weight is passed to all invocations of fit + stacker = StackingClassifier( + estimators=[("lr", CheckingClassifier(expected_sample_weight=True))], + final_estimator=CheckingClassifier(expected_sample_weight=True), + ) + stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0])) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(dual="auto", random_state=42)), + ], + final_estimator=LogisticRegression(), + ), + *load_breast_cancer(return_X_y=True), + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto", random_state=42)), + ], + final_estimator=LinearRegression(), + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_cv_influence(stacker, X, y): + # check that the stacking affects the fit of the final estimator but not + # the fit of the base estimators + # note: ConvergenceWarning are catch since we are not worrying about the + # convergence here + stacker_cv_3 = clone(stacker) + stacker_cv_5 = clone(stacker) + + stacker_cv_3.set_params(cv=3) + stacker_cv_5.set_params(cv=5) + + stacker_cv_3.fit(X, y) + stacker_cv_5.fit(X, y) + + # the base estimators should be identical + for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_): + assert_allclose(est_cv_3.coef_, est_cv_5.coef_) + + # the final estimator should be different + with pytest.raises(AssertionError, match="Not equal"): + assert_allclose( + stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_ + ) + + +@pytest.mark.parametrize( + "Stacker, Estimator, stack_method, final_estimator, X, y", + [ + ( + StackingClassifier, + DummyClassifier, + "predict_proba", + LogisticRegression(random_state=42), + X_iris, + y_iris, + ), + ( + StackingRegressor, + DummyRegressor, + "predict", + LinearRegression(), + X_diabetes, + y_diabetes, + ), + ], +) +def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y): + """Check the behaviour of stacking when `cv='prefit'`""" + X_train1, X_train2, y_train1, y_train2 = train_test_split( + X, y, random_state=42, test_size=0.5 + ) + estimators = [ + ("d0", Estimator().fit(X_train1, y_train1)), + ("d1", Estimator().fit(X_train1, y_train1)), + ] + + # mock out fit and stack_method to be asserted later + for _, estimator in estimators: + estimator.fit = Mock(name="fit") + stack_func = getattr(estimator, stack_method) + predict_method_mocked = Mock(side_effect=stack_func) + # Mocking a method will not provide a `__name__` while Python methods + # do and we are using it in `_get_response_method`. + predict_method_mocked.__name__ = stack_method + setattr(estimator, stack_method, predict_method_mocked) + + stacker = Stacker( + estimators=estimators, cv="prefit", final_estimator=final_estimator + ) + stacker.fit(X_train2, y_train2) + + assert stacker.estimators_ == [estimator for _, estimator in estimators] + # fit was not called again + assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_) + + # stack method is called with the proper inputs + for estimator in stacker.estimators_: + stack_func_mock = getattr(estimator, stack_method) + stack_func_mock.assert_called_with(X_train2) + + +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[("lr", LogisticRegression()), ("svm", SVC())], + cv="prefit", + ), + X_iris, + y_iris, + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto")), + ], + cv="prefit", + ), + X_diabetes, + y_diabetes, + ), + ], +) +def test_stacking_prefit_error(stacker, X, y): + # check that NotFittedError is raised + # if base estimators are not fitted when cv="prefit" + with pytest.raises(NotFittedError): + stacker.fit(X, y) + + +@pytest.mark.parametrize( + "make_dataset, Stacking, Estimator", + [ + (make_classification, StackingClassifier, LogisticRegression), + (make_regression, StackingRegressor, LinearRegression), + ], +) +def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator): + # Stacking supports estimators without `n_features_in_`. Regression test + # for #17353 + + class MyEstimator(Estimator): + """Estimator without n_features_in_""" + + def fit(self, X, y): + super().fit(X, y) + del self.n_features_in_ + + X, y = make_dataset(random_state=0, n_samples=100) + stacker = Stacking(estimators=[("lr", MyEstimator())]) + + msg = f"{Stacking.__name__} object has no attribute n_features_in_" + with pytest.raises(AttributeError, match=msg): + stacker.n_features_in_ + + # Does not raise + stacker.fit(X, y) + + msg = "'MyEstimator' object has no attribute 'n_features_in_'" + with pytest.raises(AttributeError, match=msg): + stacker.n_features_in_ + + +@pytest.mark.parametrize( + "estimator", + [ + # output a 2D array of the probability of the positive class for each output + MLPClassifier(random_state=42), + # output a list of 2D array containing the probability of each class + # for each output + RandomForestClassifier(random_state=42), + ], + ids=["MLPClassifier", "RandomForestClassifier"], +) +def test_stacking_classifier_multilabel_predict_proba(estimator): + """Check the behaviour for the multilabel classification case and the + `predict_proba` stacking method. + + Estimators are not consistent with the output arrays and we need to ensure that + we handle all cases. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + n_outputs = 3 + + estimators = [("est", estimator)] + stacker = StackingClassifier( + estimators=estimators, + final_estimator=KNeighborsClassifier(), + stack_method="predict_proba", + ).fit(X_train, y_train) + + X_trans = stacker.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_outputs) + # we should not have any collinear classes and thus nothing should sum to 1 + assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) + + y_pred = stacker.predict(X_test) + assert y_pred.shape == y_test.shape + + +def test_stacking_classifier_multilabel_decision_function(): + """Check the behaviour for the multilabel classification case and the + `decision_function` stacking method. Only `RidgeClassifier` supports this + case. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + n_outputs = 3 + + estimators = [("est", RidgeClassifier())] + stacker = StackingClassifier( + estimators=estimators, + final_estimator=KNeighborsClassifier(), + stack_method="decision_function", + ).fit(X_train, y_train) + + X_trans = stacker.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_outputs) + + y_pred = stacker.predict(X_test) + assert y_pred.shape == y_test.shape + + +@pytest.mark.parametrize("stack_method", ["auto", "predict"]) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough): + """Check the behaviour for the multilabel classification case for stack methods + supported for all estimators or automatically picked up. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + y_train_before_fit = y_train.copy() + n_outputs = 3 + + estimators = [ + ("mlp", MLPClassifier(random_state=42)), + ("rf", RandomForestClassifier(random_state=42)), + ("ridge", RidgeClassifier()), + ] + final_estimator = KNeighborsClassifier() + + clf = StackingClassifier( + estimators=estimators, + final_estimator=final_estimator, + passthrough=passthrough, + stack_method=stack_method, + ).fit(X_train, y_train) + + # make sure we don't change `y_train` inplace + assert_array_equal(y_train_before_fit, y_train) + + y_pred = clf.predict(X_test) + assert y_pred.shape == y_test.shape + + if stack_method == "auto": + expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"] + else: + expected_stack_methods = ["predict"] * len(estimators) + assert clf.stack_method_ == expected_stack_methods + + n_features_X_trans = n_outputs * len(estimators) + if passthrough: + n_features_X_trans += X_train.shape[1] + X_trans = clf.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_features_X_trans) + + assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs) + + +@pytest.mark.parametrize( + "stacker, feature_names, X, y, expected_names", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("svm", LinearSVC(dual="auto", random_state=0)), + ] + ), + iris.feature_names, + X_iris, + y_iris, + [ + "stackingclassifier_lr0", + "stackingclassifier_lr1", + "stackingclassifier_lr2", + "stackingclassifier_svm0", + "stackingclassifier_svm1", + "stackingclassifier_svm2", + ], + ), + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("other", "drop"), + ("svm", LinearSVC(dual="auto", random_state=0)), + ] + ), + iris.feature_names, + X_iris[:100], + y_iris[:100], # keep only classes 0 and 1 + [ + "stackingclassifier_lr", + "stackingclassifier_svm", + ], + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(dual="auto", random_state=0)), + ] + ), + diabetes.feature_names, + X_diabetes, + y_diabetes, + [ + "stackingregressor_lr", + "stackingregressor_svm", + ], + ), + ], + ids=[ + "StackingClassifier_multiclass", + "StackingClassifier_binary", + "StackingRegressor", + ], +) +@pytest.mark.parametrize("passthrough", [True, False]) +def test_get_feature_names_out( + stacker, feature_names, X, y, expected_names, passthrough +): + """Check get_feature_names_out works for stacking.""" + + stacker.set_params(passthrough=passthrough) + stacker.fit(scale(X), y) + + if passthrough: + expected_names = np.concatenate((expected_names, feature_names)) + + names_out = stacker.get_feature_names_out(feature_names) + assert_array_equal(names_out, expected_names) + + +def test_stacking_classifier_base_regressor(): + """Check that a regressor can be used as the first layer in `StackingClassifier`.""" + X_train, X_test, y_train, y_test = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + clf = StackingClassifier(estimators=[("ridge", Ridge())]) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + assert clf.score(X_test, y_test) > 0.8 + + +def test_stacking_final_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the final estimator + does not implement the `decision_function` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + X, y = make_classification(random_state=42) + + estimators = [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(n_estimators=2, random_state=42)), + ] + # RandomForestClassifier does not implement 'decision_function' and should raise + # an AttributeError + final_estimator = RandomForestClassifier(n_estimators=2, random_state=42) + clf = StackingClassifier( + estimators=estimators, final_estimator=final_estimator, cv=3 + ) + + outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'" + inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + clf.fit(X, y).decision_function(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py new file mode 100644 index 0000000000000000000000000000000000000000..011d9b40077e1cb6c5ff691a13b3feab6a3d01df --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_voting.py @@ -0,0 +1,684 @@ +"""Testing for the VotingClassifier and VotingRegressor""" + +import re + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, ClassifierMixin, clone +from sklearn.datasets import make_multilabel_classification +from sklearn.dummy import DummyRegressor +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + VotingClassifier, + VotingRegressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split +from sklearn.multiclass import OneVsRestClassifier +from sklearn.naive_bayes import GaussianNB +from sklearn.neighbors import KNeighborsClassifier +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._testing import ( + _convert_container, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +# Load datasets +iris = datasets.load_iris() +X, y = iris.data[:, 1:3], iris.target +# Scaled to solve ConvergenceWarning throw by Logistic Regression +X_scaled = StandardScaler().fit_transform(X) + +X_r, y_r = datasets.load_diabetes(return_X_y=True) + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"estimators": []}, + "Invalid 'estimators' attribute, 'estimators' should be a non-empty list", + ), + ( + {"estimators": [("lr", LogisticRegression())], "weights": [1, 2]}, + "Number of `estimators` and weights must be equal", + ), + ], +) +def test_voting_classifier_estimator_init(params, err_msg): + ensemble = VotingClassifier(**params) + with pytest.raises(ValueError, match=err_msg): + ensemble.fit(X, y) + + +def test_predictproba_hardvoting(): + eclf = VotingClassifier( + estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())], + voting="hard", + ) + + inner_msg = "predict_proba is not available when voting='hard'" + outer_msg = "'VotingClassifier' has no attribute 'predict_proba'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + eclf.predict_proba + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + assert not hasattr(eclf, "predict_proba") + eclf.fit(X_scaled, y) + assert not hasattr(eclf, "predict_proba") + + +def test_notfitted(): + eclf = VotingClassifier( + estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())], + voting="soft", + ) + ereg = VotingRegressor([("dr", DummyRegressor())]) + msg = ( + "This %s instance is not fitted yet. Call 'fit'" + " with appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg % "VotingClassifier"): + eclf.predict(X) + with pytest.raises(NotFittedError, match=msg % "VotingClassifier"): + eclf.predict_proba(X) + with pytest.raises(NotFittedError, match=msg % "VotingClassifier"): + eclf.transform(X) + with pytest.raises(NotFittedError, match=msg % "VotingRegressor"): + ereg.predict(X_r) + with pytest.raises(NotFittedError, match=msg % "VotingRegressor"): + ereg.transform(X_r) + + +def test_majority_label_iris(global_random_seed): + """Check classification by majority label on dataset iris.""" + clf1 = LogisticRegression(solver="liblinear", random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard" + ) + scores = cross_val_score(eclf, X, y, scoring="accuracy") + + assert scores.mean() >= 0.9 + + +def test_tie_situation(): + """Check voting classifier selects smaller class label in tie situation.""" + clf1 = LogisticRegression(random_state=123, solver="liblinear") + clf2 = RandomForestClassifier(random_state=123) + eclf = VotingClassifier(estimators=[("lr", clf1), ("rf", clf2)], voting="hard") + assert clf1.fit(X, y).predict(X)[73] == 2 + assert clf2.fit(X, y).predict(X)[73] == 1 + assert eclf.fit(X, y).predict(X)[73] == 1 + + +def test_weights_iris(global_random_seed): + """Check classification by average probabilities on dataset iris.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + weights=[1, 2, 10], + ) + scores = cross_val_score(eclf, X_scaled, y, scoring="accuracy") + assert scores.mean() >= 0.9 + + +def test_weights_regressor(): + """Check weighted average regression prediction on diabetes dataset.""" + reg1 = DummyRegressor(strategy="mean") + reg2 = DummyRegressor(strategy="median") + reg3 = DummyRegressor(strategy="quantile", quantile=0.2) + ereg = VotingRegressor( + [("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 2, 10] + ) + + X_r_train, X_r_test, y_r_train, y_r_test = train_test_split( + X_r, y_r, test_size=0.25 + ) + + reg1_pred = reg1.fit(X_r_train, y_r_train).predict(X_r_test) + reg2_pred = reg2.fit(X_r_train, y_r_train).predict(X_r_test) + reg3_pred = reg3.fit(X_r_train, y_r_train).predict(X_r_test) + ereg_pred = ereg.fit(X_r_train, y_r_train).predict(X_r_test) + + avg = np.average( + np.asarray([reg1_pred, reg2_pred, reg3_pred]), axis=0, weights=[1, 2, 10] + ) + assert_almost_equal(ereg_pred, avg, decimal=2) + + ereg_weights_none = VotingRegressor( + [("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=None + ) + ereg_weights_equal = VotingRegressor( + [("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 1, 1] + ) + ereg_weights_none.fit(X_r_train, y_r_train) + ereg_weights_equal.fit(X_r_train, y_r_train) + ereg_none_pred = ereg_weights_none.predict(X_r_test) + ereg_equal_pred = ereg_weights_equal.predict(X_r_test) + assert_almost_equal(ereg_none_pred, ereg_equal_pred, decimal=2) + + +def test_predict_on_toy_problem(global_random_seed): + """Manually check predicted class labels for toy dataset.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + + X = np.array( + [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]] + ) + + y = np.array([1, 1, 1, 2, 2, 2]) + + assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="hard", + weights=[1, 1, 1], + ) + assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + weights=[1, 1, 1], + ) + assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) + + +def test_predict_proba_on_toy_problem(): + """Calculate predicted probabilities on toy dataset.""" + clf1 = LogisticRegression(random_state=123) + clf2 = RandomForestClassifier(random_state=123) + clf3 = GaussianNB() + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + clf1_res = np.array( + [ + [0.59790391, 0.40209609], + [0.57622162, 0.42377838], + [0.50728456, 0.49271544], + [0.40241774, 0.59758226], + ] + ) + + clf2_res = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.3, 0.7]]) + + clf3_res = np.array( + [[0.9985082, 0.0014918], [0.99845843, 0.00154157], [0.0, 1.0], [0.0, 1.0]] + ) + + t00 = (2 * clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4 + t11 = (2 * clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4 + t21 = (2 * clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4 + t31 = (2 * clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4 + + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + weights=[2, 1, 1], + ) + eclf_res = eclf.fit(X, y).predict_proba(X) + + assert_almost_equal(t00, eclf_res[0][0], decimal=1) + assert_almost_equal(t11, eclf_res[1][1], decimal=1) + assert_almost_equal(t21, eclf_res[2][1], decimal=1) + assert_almost_equal(t31, eclf_res[3][1], decimal=1) + + inner_msg = "predict_proba is not available when voting='hard'" + outer_msg = "'VotingClassifier' has no attribute 'predict_proba'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard" + ) + eclf.fit(X, y).predict_proba(X) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + +@pytest.mark.parametrize("container_type", ["list", "array", "dataframe"]) +def test_multilabel(container_type): + """Check if error is raised for multilabel classification.""" + X, y = make_multilabel_classification( + n_classes=2, n_labels=1, allow_unlabeled=False, random_state=123 + ) + y = _convert_container(y, container_type) + clf = OneVsRestClassifier(SVC(kernel="linear")) + + eclf = VotingClassifier(estimators=[("ovr", clf)], voting="hard") + err_msg = "only supports binary or multiclass classification" + with pytest.raises(NotImplementedError, match=err_msg): + eclf.fit(X, y) + + +def test_gridsearch(): + """Check GridSearch support.""" + clf1 = LogisticRegression(random_state=1) + clf2 = RandomForestClassifier(random_state=1, n_estimators=3) + clf3 = GaussianNB() + eclf = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft" + ) + + params = { + "lr__C": [1.0, 100.0], + "voting": ["soft", "hard"], + "weights": [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]], + } + + grid = GridSearchCV(estimator=eclf, param_grid=params, cv=2) + grid.fit(X_scaled, y) + + +def test_parallel_fit(global_random_seed): + """Check parallel backend of VotingClassifier on toy dataset.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=1 + ).fit(X, y) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=2 + ).fit(X, y) + + assert_array_equal(eclf1.predict(X), eclf2.predict(X)) + assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) + + +def test_sample_weight(global_random_seed): + """Tests sample_weight parameter of VotingClassifier""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = SVC(probability=True, random_state=global_random_seed) + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft" + ).fit(X_scaled, y, sample_weight=np.ones((len(y),))) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft" + ).fit(X_scaled, y) + assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled)) + assert_array_almost_equal( + eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled) + ) + sample_weight = np.random.RandomState(global_random_seed).uniform(size=(len(y),)) + eclf3 = VotingClassifier(estimators=[("lr", clf1)], voting="soft") + eclf3.fit(X_scaled, y, sample_weight) + clf1.fit(X_scaled, y, sample_weight) + assert_array_equal(eclf3.predict(X_scaled), clf1.predict(X_scaled)) + assert_array_almost_equal( + eclf3.predict_proba(X_scaled), clf1.predict_proba(X_scaled) + ) + + # check that an error is raised and indicative if sample_weight is not + # supported. + clf4 = KNeighborsClassifier() + eclf3 = VotingClassifier( + estimators=[("lr", clf1), ("svc", clf3), ("knn", clf4)], voting="soft" + ) + msg = "Underlying estimator KNeighborsClassifier does not support sample weights." + with pytest.raises(TypeError, match=msg): + eclf3.fit(X_scaled, y, sample_weight) + + # check that _fit_single_estimator will raise the right error + # it should raise the original error if this is not linked to sample_weight + class ClassifierErrorFit(ClassifierMixin, BaseEstimator): + def fit(self, X_scaled, y, sample_weight): + raise TypeError("Error unrelated to sample_weight.") + + clf = ClassifierErrorFit() + with pytest.raises(TypeError, match="Error unrelated to sample_weight"): + clf.fit(X_scaled, y, sample_weight=sample_weight) + + +def test_sample_weight_kwargs(): + """Check that VotingClassifier passes sample_weight as kwargs""" + + class MockClassifier(ClassifierMixin, BaseEstimator): + """Mock Classifier to check that sample_weight is received as kwargs""" + + def fit(self, X, y, *args, **sample_weight): + assert "sample_weight" in sample_weight + + clf = MockClassifier() + eclf = VotingClassifier(estimators=[("mock", clf)], voting="soft") + + # Should not raise an error. + eclf.fit(X, y, sample_weight=np.ones((len(y),))) + + +def test_voting_classifier_set_params(global_random_seed): + # check equivalence in the output when setting underlying estimators + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier( + n_estimators=10, random_state=global_random_seed, max_depth=None + ) + clf3 = GaussianNB() + + eclf1 = VotingClassifier( + [("lr", clf1), ("rf", clf2)], voting="soft", weights=[1, 2] + ).fit(X_scaled, y) + eclf2 = VotingClassifier( + [("lr", clf1), ("nb", clf3)], voting="soft", weights=[1, 2] + ) + eclf2.set_params(nb=clf2).fit(X_scaled, y) + + assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled)) + assert_array_almost_equal( + eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled) + ) + assert eclf2.estimators[0][1].get_params() == clf1.get_params() + assert eclf2.estimators[1][1].get_params() == clf2.get_params() + + +def test_set_estimator_drop(): + # VotingClassifier set_params should be able to set estimators as drop + # Test predict + clf1 = LogisticRegression(random_state=123) + clf2 = RandomForestClassifier(n_estimators=10, random_state=123) + clf3 = GaussianNB() + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)], + voting="hard", + weights=[1, 0, 0.5], + ).fit(X, y) + + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)], + voting="hard", + weights=[1, 1, 0.5], + ) + eclf2.set_params(rf="drop").fit(X, y) + + assert_array_equal(eclf1.predict(X), eclf2.predict(X)) + + assert dict(eclf2.estimators)["rf"] == "drop" + assert len(eclf2.estimators_) == 2 + assert all( + isinstance(est, (LogisticRegression, GaussianNB)) for est in eclf2.estimators_ + ) + assert eclf2.get_params()["rf"] == "drop" + + eclf1.set_params(voting="soft").fit(X, y) + eclf2.set_params(voting="soft").fit(X, y) + + assert_array_equal(eclf1.predict(X), eclf2.predict(X)) + assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) + msg = "All estimators are dropped. At least one is required" + with pytest.raises(ValueError, match=msg): + eclf2.set_params(lr="drop", rf="drop", nb="drop").fit(X, y) + + # Test soft voting transform + X1 = np.array([[1], [2]]) + y1 = np.array([1, 2]) + eclf1 = VotingClassifier( + estimators=[("rf", clf2), ("nb", clf3)], + voting="soft", + weights=[0, 0.5], + flatten_transform=False, + ).fit(X1, y1) + + eclf2 = VotingClassifier( + estimators=[("rf", clf2), ("nb", clf3)], + voting="soft", + weights=[1, 0.5], + flatten_transform=False, + ) + eclf2.set_params(rf="drop").fit(X1, y1) + assert_array_almost_equal( + eclf1.transform(X1), + np.array([[[0.7, 0.3], [0.3, 0.7]], [[1.0, 0.0], [0.0, 1.0]]]), + ) + assert_array_almost_equal(eclf2.transform(X1), np.array([[[1.0, 0.0], [0.0, 1.0]]])) + eclf1.set_params(voting="hard") + eclf2.set_params(voting="hard") + assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]])) + assert_array_equal(eclf2.transform(X1), np.array([[0], [1]])) + + +def test_estimator_weights_format(global_random_seed): + # Test estimator weights inputs as list and array + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2)], weights=[1, 2], voting="soft" + ) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2)], weights=np.array((1, 2)), voting="soft" + ) + eclf1.fit(X_scaled, y) + eclf2.fit(X_scaled, y) + assert_array_almost_equal( + eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled) + ) + + +def test_transform(global_random_seed): + """Check transform method of VotingClassifier on toy dataset.""" + clf1 = LogisticRegression(random_state=global_random_seed) + clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) + clf3 = GaussianNB() + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + eclf1 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft" + ).fit(X, y) + eclf2 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + flatten_transform=True, + ).fit(X, y) + eclf3 = VotingClassifier( + estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], + voting="soft", + flatten_transform=False, + ).fit(X, y) + + assert_array_equal(eclf1.transform(X).shape, (4, 6)) + assert_array_equal(eclf2.transform(X).shape, (4, 6)) + assert_array_equal(eclf3.transform(X).shape, (3, 4, 2)) + assert_array_almost_equal(eclf1.transform(X), eclf2.transform(X)) + assert_array_almost_equal( + eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)), eclf2.transform(X) + ) + + +@pytest.mark.parametrize( + "X, y, voter", + [ + ( + X, + y, + VotingClassifier( + [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(n_estimators=5)), + ] + ), + ), + ( + X_r, + y_r, + VotingRegressor( + [ + ("lr", LinearRegression()), + ("rf", RandomForestRegressor(n_estimators=5)), + ] + ), + ), + ], +) +def test_none_estimator_with_weights(X, y, voter): + # check that an estimator can be set to 'drop' and passing some weight + # regression test for + # https://github.com/scikit-learn/scikit-learn/issues/13777 + voter = clone(voter) + # Scaled to solve ConvergenceWarning throw by Logistic Regression + X_scaled = StandardScaler().fit_transform(X) + voter.fit(X_scaled, y, sample_weight=np.ones(y.shape)) + voter.set_params(lr="drop") + voter.fit(X_scaled, y, sample_weight=np.ones(y.shape)) + y_pred = voter.predict(X_scaled) + assert y_pred.shape == y.shape + + +@pytest.mark.parametrize( + "est", + [ + VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("tree", DecisionTreeRegressor(random_state=0)), + ] + ), + VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("tree", DecisionTreeClassifier(random_state=0)), + ] + ), + ], + ids=["VotingRegressor", "VotingClassifier"], +) +def test_n_features_in(est): + X = [[1, 2], [3, 4], [5, 6]] + y = [0, 1, 2] + + assert not hasattr(est, "n_features_in_") + est.fit(X, y) + assert est.n_features_in_ == 2 + + +@pytest.mark.parametrize( + "estimator", + [ + VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("rf", RandomForestRegressor(random_state=123)), + ], + verbose=True, + ), + VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=123)), + ("rf", RandomForestClassifier(random_state=123)), + ], + verbose=True, + ), + ], +) +def test_voting_verbose(estimator, capsys): + X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) + y = np.array([1, 1, 2, 2]) + + pattern = ( + r"\[Voting\].*\(1 of 2\) Processing lr, total=.*\n" + r"\[Voting\].*\(2 of 2\) Processing rf, total=.*\n$" + ) + + estimator.fit(X, y) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_get_features_names_out_regressor(): + """Check get_feature_names_out output for regressor.""" + + X = [[1, 2], [3, 4], [5, 6]] + y = [0, 1, 2] + + voting = VotingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("tree", DecisionTreeRegressor(random_state=0)), + ("ignore", "drop"), + ] + ) + voting.fit(X, y) + + names_out = voting.get_feature_names_out() + expected_names = ["votingregressor_lr", "votingregressor_tree"] + assert_array_equal(names_out, expected_names) + + +@pytest.mark.parametrize( + "kwargs, expected_names", + [ + ( + {"voting": "soft", "flatten_transform": True}, + [ + "votingclassifier_lr0", + "votingclassifier_lr1", + "votingclassifier_lr2", + "votingclassifier_tree0", + "votingclassifier_tree1", + "votingclassifier_tree2", + ], + ), + ({"voting": "hard"}, ["votingclassifier_lr", "votingclassifier_tree"]), + ], +) +def test_get_features_names_out_classifier(kwargs, expected_names): + """Check get_feature_names_out for classifier for different settings.""" + X = [[1, 2], [3, 4], [5, 6], [1, 1.2]] + y = [0, 1, 2, 0] + + voting = VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("tree", DecisionTreeClassifier(random_state=0)), + ], + **kwargs, + ) + voting.fit(X, y) + X_trans = voting.transform(X) + names_out = voting.get_feature_names_out() + + assert X_trans.shape[1] == len(expected_names) + assert_array_equal(names_out, expected_names) + + +def test_get_features_names_out_classifier_error(): + """Check that error is raised when voting="soft" and flatten_transform=False.""" + X = [[1, 2], [3, 4], [5, 6]] + y = [0, 1, 2] + + voting = VotingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("tree", DecisionTreeClassifier(random_state=0)), + ], + voting="soft", + flatten_transform=False, + ) + voting.fit(X, y) + + msg = ( + "get_feature_names_out is not supported when `voting='soft'` and " + "`flatten_transform=False`" + ) + with pytest.raises(ValueError, match=msg): + voting.get_feature_names_out() diff --git a/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..251139de62940b91cd21eec0a44bb9de865d2457 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py @@ -0,0 +1,705 @@ +"""Testing for the boost module (sklearn.ensemble.boost).""" + +import re + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, clone +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor +from sklearn.ensemble._weight_boosting import _samme_proba +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.svm import SVC, SVR +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils import shuffle +from sklearn.utils._mocking import NoSampleWeightWrapper +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +# Common random state +rng = np.random.RandomState(0) + +# Toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels +y_regr = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +y_t_class = ["foo", 1, 1] +y_t_regr = [-1, 1, 1] + +# Load the iris dataset and randomly permute it +iris = datasets.load_iris() +perm = rng.permutation(iris.target.size) +iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng) + +# Load the diabetes dataset and randomly permute it +diabetes = datasets.load_diabetes() +diabetes.data, diabetes.target = shuffle( + diabetes.data, diabetes.target, random_state=rng +) + + +def test_samme_proba(): + # Test the `_samme_proba` helper function. + + # Define some example (bad) `predict_proba` output. + probs = np.array( + [[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]] + ) + probs /= np.abs(probs.sum(axis=1))[:, np.newaxis] + + # _samme_proba calls estimator.predict_proba. + # Make a mock object so I can control what gets returned. + class MockEstimator: + def predict_proba(self, X): + assert_array_equal(X.shape, probs.shape) + return probs + + mock = MockEstimator() + + samme_proba = _samme_proba(mock, 3, np.ones_like(probs)) + + assert_array_equal(samme_proba.shape, probs.shape) + assert np.isfinite(samme_proba).all() + + # Make sure that the correct elements come out as smallest -- + # `_samme_proba` should preserve the ordering in each example. + assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2]) + assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1]) + + +def test_oneclass_adaboost_proba(): + # Test predict_proba robustness for one class label input. + # In response to issue #7501 + # https://github.com/scikit-learn/scikit-learn/issues/7501 + y_t = np.ones(len(X)) + clf = AdaBoostClassifier(algorithm="SAMME").fit(X, y_t) + assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1))) + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) +def test_classification_toy(algorithm): + # Check classification on a toy dataset. + clf = AdaBoostClassifier(algorithm=algorithm, random_state=0) + clf.fit(X, y_class) + assert_array_equal(clf.predict(T), y_t_class) + assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_) + assert clf.predict_proba(T).shape == (len(T), 2) + assert clf.decision_function(T).shape == (len(T),) + + +def test_regression_toy(): + # Check classification on a toy dataset. + clf = AdaBoostRegressor(random_state=0) + clf.fit(X, y_regr) + assert_array_equal(clf.predict(T), y_t_regr) + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +def test_iris(): + # Check consistency on dataset iris. + classes = np.unique(iris.target) + clf_samme = prob_samme = None + + for alg in ["SAMME", "SAMME.R"]: + clf = AdaBoostClassifier(algorithm=alg) + clf.fit(iris.data, iris.target) + + assert_array_equal(classes, clf.classes_) + proba = clf.predict_proba(iris.data) + if alg == "SAMME": + clf_samme = clf + prob_samme = proba + assert proba.shape[1] == len(classes) + assert clf.decision_function(iris.data).shape[1] == len(classes) + + score = clf.score(iris.data, iris.target) + assert score > 0.9, "Failed with algorithm %s and score = %f" % (alg, score) + + # Check we used multiple estimators + assert len(clf.estimators_) > 1 + # Check for distinct random states (see issue #7408) + assert len(set(est.random_state for est in clf.estimators_)) == len( + clf.estimators_ + ) + + # Somewhat hacky regression test: prior to + # ae7adc880d624615a34bafdb1d75ef67051b8200, + # predict_proba returned SAMME.R values for SAMME. + clf_samme.algorithm = "SAMME.R" + assert_array_less(0, np.abs(clf_samme.predict_proba(iris.data) - prob_samme)) + + +@pytest.mark.parametrize("loss", ["linear", "square", "exponential"]) +def test_diabetes(loss): + # Check consistency on dataset diabetes. + reg = AdaBoostRegressor(loss=loss, random_state=0) + reg.fit(diabetes.data, diabetes.target) + score = reg.score(diabetes.data, diabetes.target) + assert score > 0.55 + + # Check we used multiple estimators + assert len(reg.estimators_) > 1 + # Check for distinct random states (see issue #7408) + assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_) + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) +def test_staged_predict(algorithm): + # Check staged predictions. + rng = np.random.RandomState(0) + iris_weights = rng.randint(10, size=iris.target.shape) + diabetes_weights = rng.randint(10, size=diabetes.target.shape) + + clf = AdaBoostClassifier(algorithm=algorithm, n_estimators=10) + clf.fit(iris.data, iris.target, sample_weight=iris_weights) + + predictions = clf.predict(iris.data) + staged_predictions = [p for p in clf.staged_predict(iris.data)] + proba = clf.predict_proba(iris.data) + staged_probas = [p for p in clf.staged_predict_proba(iris.data)] + score = clf.score(iris.data, iris.target, sample_weight=iris_weights) + staged_scores = [ + s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights) + ] + + assert len(staged_predictions) == 10 + assert_array_almost_equal(predictions, staged_predictions[-1]) + assert len(staged_probas) == 10 + assert_array_almost_equal(proba, staged_probas[-1]) + assert len(staged_scores) == 10 + assert_array_almost_equal(score, staged_scores[-1]) + + # AdaBoost regression + clf = AdaBoostRegressor(n_estimators=10, random_state=0) + clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights) + + predictions = clf.predict(diabetes.data) + staged_predictions = [p for p in clf.staged_predict(diabetes.data)] + score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights) + staged_scores = [ + s + for s in clf.staged_score( + diabetes.data, diabetes.target, sample_weight=diabetes_weights + ) + ] + + assert len(staged_predictions) == 10 + assert_array_almost_equal(predictions, staged_predictions[-1]) + assert len(staged_scores) == 10 + assert_array_almost_equal(score, staged_scores[-1]) + + +def test_gridsearch(): + # Check that base trees can be grid-searched. + # AdaBoost classification + boost = AdaBoostClassifier(estimator=DecisionTreeClassifier()) + parameters = { + "n_estimators": (1, 2), + "estimator__max_depth": (1, 2), + "algorithm": ("SAMME", "SAMME.R"), + } + clf = GridSearchCV(boost, parameters) + clf.fit(iris.data, iris.target) + + # AdaBoost regression + boost = AdaBoostRegressor(estimator=DecisionTreeRegressor(), random_state=0) + parameters = {"n_estimators": (1, 2), "estimator__max_depth": (1, 2)} + clf = GridSearchCV(boost, parameters) + clf.fit(diabetes.data, diabetes.target) + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +def test_pickle(): + # Check pickability. + import pickle + + # Adaboost classifier + for alg in ["SAMME", "SAMME.R"]: + obj = AdaBoostClassifier(algorithm=alg) + obj.fit(iris.data, iris.target) + score = obj.score(iris.data, iris.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(iris.data, iris.target) + assert score == score2 + + # Adaboost regressor + obj = AdaBoostRegressor(random_state=0) + obj.fit(diabetes.data, diabetes.target) + score = obj.score(diabetes.data, diabetes.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(diabetes.data, diabetes.target) + assert score == score2 + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +def test_importances(): + # Check variable importances. + X, y = datasets.make_classification( + n_samples=2000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=1, + ) + + for alg in ["SAMME", "SAMME.R"]: + clf = AdaBoostClassifier(algorithm=alg) + + clf.fit(X, y) + importances = clf.feature_importances_ + + assert importances.shape[0] == 10 + assert (importances[:3, np.newaxis] >= importances[3:]).all() + + +def test_adaboost_classifier_sample_weight_error(): + # Test that it gives proper exception on incorrect sample weight. + clf = AdaBoostClassifier() + msg = re.escape("sample_weight.shape == (1,), expected (6,)") + with pytest.raises(ValueError, match=msg): + clf.fit(X, y_class, sample_weight=np.asarray([-1])) + + +def test_estimator(): + # Test different estimators. + from sklearn.ensemble import RandomForestClassifier + + # XXX doesn't work with y_class because RF doesn't support classes_ + # Shouldn't AdaBoost run a LabelBinarizer? + clf = AdaBoostClassifier(RandomForestClassifier(), algorithm="SAMME") + clf.fit(X, y_regr) + + clf = AdaBoostClassifier(SVC(), algorithm="SAMME") + clf.fit(X, y_class) + + from sklearn.ensemble import RandomForestRegressor + + clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) + clf.fit(X, y_regr) + + clf = AdaBoostRegressor(SVR(), random_state=0) + clf.fit(X, y_regr) + + # Check that an empty discrete ensemble fails in fit, not predict. + X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] + y_fail = ["foo", "bar", 1, 2] + clf = AdaBoostClassifier(SVC(), algorithm="SAMME") + with pytest.raises(ValueError, match="worse than random"): + clf.fit(X_fail, y_fail) + + +def test_sample_weights_infinite(): + msg = "Sample weights have reached infinite values" + clf = AdaBoostClassifier(n_estimators=30, learning_rate=23.0, algorithm="SAMME") + with pytest.warns(UserWarning, match=msg): + clf.fit(iris.data, iris.target) + + +@pytest.mark.parametrize( + "sparse_container, expected_internal_type", + zip( + [ + *CSC_CONTAINERS, + *CSR_CONTAINERS, + *LIL_CONTAINERS, + *COO_CONTAINERS, + *DOK_CONTAINERS, + ], + CSC_CONTAINERS + 4 * CSR_CONTAINERS, + ), +) +def test_sparse_classification(sparse_container, expected_internal_type): + # Check classification with sparse input. + + class CustomSVC(SVC): + """SVC variant that records the nature of the training set.""" + + def fit(self, X, y, sample_weight=None): + """Modification on fit caries data type for later verification.""" + super().fit(X, y, sample_weight=sample_weight) + self.data_type_ = type(X) + return self + + X, y = datasets.make_multilabel_classification( + n_classes=1, n_samples=15, n_features=5, random_state=42 + ) + # Flatten y to a 1d array + y = np.ravel(y) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + # Trained on sparse format + sparse_classifier = AdaBoostClassifier( + estimator=CustomSVC(probability=True), + random_state=1, + algorithm="SAMME", + ).fit(X_train_sparse, y_train) + + # Trained on dense format + dense_classifier = AdaBoostClassifier( + estimator=CustomSVC(probability=True), + random_state=1, + algorithm="SAMME", + ).fit(X_train, y_train) + + # predict + sparse_clf_results = sparse_classifier.predict(X_test_sparse) + dense_clf_results = dense_classifier.predict(X_test) + assert_array_equal(sparse_clf_results, dense_clf_results) + + # decision_function + sparse_clf_results = sparse_classifier.decision_function(X_test_sparse) + dense_clf_results = dense_classifier.decision_function(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # predict_log_proba + sparse_clf_results = sparse_classifier.predict_log_proba(X_test_sparse) + dense_clf_results = dense_classifier.predict_log_proba(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # predict_proba + sparse_clf_results = sparse_classifier.predict_proba(X_test_sparse) + dense_clf_results = dense_classifier.predict_proba(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # score + sparse_clf_results = sparse_classifier.score(X_test_sparse, y_test) + dense_clf_results = dense_classifier.score(X_test, y_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # staged_decision_function + sparse_clf_results = sparse_classifier.staged_decision_function(X_test_sparse) + dense_clf_results = dense_classifier.staged_decision_function(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_almost_equal(sparse_clf_res, dense_clf_res) + + # staged_predict + sparse_clf_results = sparse_classifier.staged_predict(X_test_sparse) + dense_clf_results = dense_classifier.staged_predict(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_equal(sparse_clf_res, dense_clf_res) + + # staged_predict_proba + sparse_clf_results = sparse_classifier.staged_predict_proba(X_test_sparse) + dense_clf_results = dense_classifier.staged_predict_proba(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_almost_equal(sparse_clf_res, dense_clf_res) + + # staged_score + sparse_clf_results = sparse_classifier.staged_score(X_test_sparse, y_test) + dense_clf_results = dense_classifier.staged_score(X_test, y_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_equal(sparse_clf_res, dense_clf_res) + + # Verify sparsity of data is maintained during training + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert all([t == expected_internal_type for t in types]) + + +@pytest.mark.parametrize( + "sparse_container, expected_internal_type", + zip( + [ + *CSC_CONTAINERS, + *CSR_CONTAINERS, + *LIL_CONTAINERS, + *COO_CONTAINERS, + *DOK_CONTAINERS, + ], + CSC_CONTAINERS + 4 * CSR_CONTAINERS, + ), +) +def test_sparse_regression(sparse_container, expected_internal_type): + # Check regression with sparse input. + + class CustomSVR(SVR): + """SVR variant that records the nature of the training set.""" + + def fit(self, X, y, sample_weight=None): + """Modification on fit caries data type for later verification.""" + super().fit(X, y, sample_weight=sample_weight) + self.data_type_ = type(X) + return self + + X, y = datasets.make_regression( + n_samples=15, n_features=50, n_targets=1, random_state=42 + ) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + # Trained on sparse format + sparse_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit( + X_train_sparse, y_train + ) + + # Trained on dense format + dense_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit( + X_train, y_train + ) + + # predict + sparse_regr_results = sparse_regressor.predict(X_test_sparse) + dense_regr_results = dense_regressor.predict(X_test) + assert_array_almost_equal(sparse_regr_results, dense_regr_results) + + # staged_predict + sparse_regr_results = sparse_regressor.staged_predict(X_test_sparse) + dense_regr_results = dense_regressor.staged_predict(X_test) + for sparse_regr_res, dense_regr_res in zip(sparse_regr_results, dense_regr_results): + assert_array_almost_equal(sparse_regr_res, dense_regr_res) + + types = [i.data_type_ for i in sparse_regressor.estimators_] + + assert all([t == expected_internal_type for t in types]) + + +def test_sample_weight_adaboost_regressor(): + """ + AdaBoostRegressor should work without sample_weights in the base estimator + The random weighted sampling is done internally in the _boost method in + AdaBoostRegressor. + """ + + class DummyEstimator(BaseEstimator): + def fit(self, X, y): + pass + + def predict(self, X): + return np.zeros(X.shape[0]) + + boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3) + boost.fit(X, y_regr) + assert len(boost.estimator_weights_) == len(boost.estimator_errors_) + + +def test_multidimensional_X(): + """ + Check that the AdaBoost estimators can work with n-dimensional + data matrix + """ + rng = np.random.RandomState(0) + + X = rng.randn(51, 3, 3) + yc = rng.choice([0, 1], 51) + yr = rng.randn(51) + + boost = AdaBoostClassifier( + DummyClassifier(strategy="most_frequent"), algorithm="SAMME" + ) + boost.fit(X, yc) + boost.predict(X) + boost.predict_proba(X) + + boost = AdaBoostRegressor(DummyRegressor()) + boost.fit(X, yr) + boost.predict(X) + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) +def test_adaboostclassifier_without_sample_weight(algorithm): + X, y = iris.data, iris.target + estimator = NoSampleWeightWrapper(DummyClassifier()) + clf = AdaBoostClassifier(estimator=estimator, algorithm=algorithm) + err_msg = "{} doesn't support sample_weight".format(estimator.__class__.__name__) + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, y) + + +def test_adaboostregressor_sample_weight(): + # check that giving weight will have an influence on the error computed + # for a weak learner + rng = np.random.RandomState(42) + X = np.linspace(0, 100, num=1000) + y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001) + X = X.reshape(-1, 1) + + # add an arbitrary outlier + X[-1] *= 10 + y[-1] = 10000 + + # random_state=0 ensure that the underlying bootstrap will use the outlier + regr_no_outlier = AdaBoostRegressor( + estimator=LinearRegression(), n_estimators=1, random_state=0 + ) + regr_with_weight = clone(regr_no_outlier) + regr_with_outlier = clone(regr_no_outlier) + + # fit 3 models: + # - a model containing the outlier + # - a model without the outlier + # - a model containing the outlier but with a null sample-weight + regr_with_outlier.fit(X, y) + regr_no_outlier.fit(X[:-1], y[:-1]) + sample_weight = np.ones_like(y) + sample_weight[-1] = 0 + regr_with_weight.fit(X, y, sample_weight=sample_weight) + + score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1]) + score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1]) + score_with_weight = regr_with_weight.score(X[:-1], y[:-1]) + + assert score_with_outlier < score_no_outlier + assert score_with_outlier < score_with_weight + assert score_no_outlier == pytest.approx(score_with_weight) + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) +def test_adaboost_consistent_predict(algorithm): + # check that predict_proba and predict give consistent results + # regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/14084 + X_train, X_test, y_train, y_test = train_test_split( + *datasets.load_digits(return_X_y=True), random_state=42 + ) + model = AdaBoostClassifier(algorithm=algorithm, random_state=42) + model.fit(X_train, y_train) + + assert_array_equal( + np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test) + ) + + +@pytest.mark.parametrize( + "model, X, y", + [ + (AdaBoostClassifier(), iris.data, iris.target), + (AdaBoostRegressor(), diabetes.data, diabetes.target), + ], +) +def test_adaboost_negative_weight_error(model, X, y): + sample_weight = np.ones_like(y) + sample_weight[-1] = -10 + + err_msg = "Negative values in data passed to `sample_weight`" + with pytest.raises(ValueError, match=err_msg): + model.fit(X, y, sample_weight=sample_weight) + + +def test_adaboost_numerically_stable_feature_importance_with_small_weights(): + """Check that we don't create NaN feature importance with numerically + instable inputs. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20320 + """ + rng = np.random.RandomState(42) + X = rng.normal(size=(1000, 10)) + y = rng.choice([0, 1], size=1000) + sample_weight = np.ones_like(y) * 1e-263 + tree = DecisionTreeClassifier(max_depth=10, random_state=12) + ada_model = AdaBoostClassifier( + estimator=tree, n_estimators=20, algorithm="SAMME", random_state=12 + ) + ada_model.fit(X, y, sample_weight=sample_weight) + assert np.isnan(ada_model.feature_importances_).sum() == 0 + + +# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed +# and substituted with the SAMME algorithm as a default; also re-write test to +# only consider "SAMME" +@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm") +@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) +def test_adaboost_decision_function(algorithm, global_random_seed): + """Check that the decision function respects the symmetric constraint for weak + learners. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26520 + """ + n_classes = 3 + X, y = datasets.make_classification( + n_classes=n_classes, n_clusters_per_class=1, random_state=global_random_seed + ) + clf = AdaBoostClassifier( + n_estimators=1, random_state=global_random_seed, algorithm=algorithm + ).fit(X, y) + + y_score = clf.decision_function(X) + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + if algorithm == "SAMME": + # With a single learner, we expect to have a decision function in + # {1, - 1 / (n_classes - 1)}. + assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)} + + # We can assert the same for staged_decision_function since we have a single learner + for y_score in clf.staged_decision_function(X): + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + if algorithm == "SAMME": + # With a single learner, we expect to have a decision function in + # {1, - 1 / (n_classes - 1)}. + assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)} + + clf.set_params(n_estimators=5).fit(X, y) + + y_score = clf.decision_function(X) + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + for y_score in clf.staged_decision_function(X): + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + +# TODO(1.6): remove +def test_deprecated_samme_r_algorithm(): + adaboost_clf = AdaBoostClassifier(n_estimators=1) + with pytest.warns( + FutureWarning, + match=re.escape("The SAMME.R algorithm (the default) is deprecated"), + ): + adaboost_clf.fit(X, y_class) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..983adf177911ba1e378f367c177b50803aed5d21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed92a0a421b42411f9b82fef79beae4245feec68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0341be1842cdedeb4ea6f909f19b1c631d7d022 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a287969568784b71cc92855e3be46f368fedf55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a5291dc032f5c91e68b9f4575489e6258b03577 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14c691de6295b5908e7bec5359f8394d5d6bf33f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d7dd122644655e8580c4a3dc5c75170776a8f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e3af17b455f1786472e4b0dc6efc5bc2653a4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0677adc4297c647865b1f9b0cfcdda074d06c7c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc differ