diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee15e693c16f6aa996928f2862da30119b241f6d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__init__.py @@ -0,0 +1,30 @@ +""" +The :mod:`sklearn._loss` module includes loss function classes suitable for +fitting classification and regression tasks. +""" + +from .loss import ( + AbsoluteError, + HalfBinomialLoss, + HalfGammaLoss, + HalfMultinomialLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, + HuberLoss, + PinballLoss, +) + +__all__ = [ + "HalfSquaredError", + "AbsoluteError", + "PinballLoss", + "HuberLoss", + "HalfPoissonLoss", + "HalfGammaLoss", + "HalfTweedieLoss", + "HalfTweedieLossIdentity", + "HalfBinomialLoss", + "HalfMultinomialLoss", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7db4ab2d43e4afbad31bb50dfe17ccff88b7b1a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a5b4e17c36363131ce16a1a5d4470bb4a1a19be Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d3920a1b320e4653f1a15d8449515bc2285c3f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd new file mode 100644 index 0000000000000000000000000000000000000000..f38cbe0badc96040426b61aef5e34e348cf7cfd1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd @@ -0,0 +1,91 @@ +# Fused types for input like y_true, raw_prediction, sample_weights. +ctypedef fused floating_in: + double + float + + +# Fused types for output like gradient and hessian +# We use a different fused types for input (floating_in) and output (floating_out), such +# that input and output can have different dtypes in the same function call. A single +# fused type can only take on one single value (type) for all arguments in one function +# call. +ctypedef fused floating_out: + double + float + + +# Struct to return 2 doubles +ctypedef struct double_pair: + double val1 + double val2 + + +# C base class for loss functions +cdef class CyLossFunction: + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfSquaredError(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyAbsoluteError(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyPinballLoss(CyLossFunction): + cdef readonly double quantile # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHuberLoss(CyLossFunction): + cdef public double delta # public makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfPoissonLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfGammaLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfTweedieLoss(CyLossFunction): + cdef readonly double power # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfTweedieLossIdentity(CyLossFunction): + cdef readonly double power # readonly makes it accessible from Python + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyHalfBinomialLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil + + +cdef class CyExponentialLoss(CyLossFunction): + cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil + cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil + cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/link.py b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/link.py new file mode 100644 index 0000000000000000000000000000000000000000..9459844f6b89afe7cf6b2ac2edfd892373fb6d51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/link.py @@ -0,0 +1,280 @@ +""" +Module contains classes for invertible (and differentiable) link functions. +""" +# Author: Christian Lorentzen + +from abc import ABC, abstractmethod +from dataclasses import dataclass + +import numpy as np +from scipy.special import expit, logit +from scipy.stats import gmean + +from ..utils.extmath import softmax + + +@dataclass +class Interval: + low: float + high: float + low_inclusive: bool + high_inclusive: bool + + def __post_init__(self): + """Check that low <= high""" + if self.low > self.high: + raise ValueError( + f"One must have low <= high; got low={self.low}, high={self.high}." + ) + + def includes(self, x): + """Test whether all values of x are in interval range. + + Parameters + ---------- + x : ndarray + Array whose elements are tested to be in interval range. + + Returns + ------- + result : bool + """ + if self.low_inclusive: + low = np.greater_equal(x, self.low) + else: + low = np.greater(x, self.low) + + if not np.all(low): + return False + + if self.high_inclusive: + high = np.less_equal(x, self.high) + else: + high = np.less(x, self.high) + + # Note: np.all returns numpy.bool_ + return bool(np.all(high)) + + +def _inclusive_low_high(interval, dtype=np.float64): + """Generate values low and high to be within the interval range. + + This is used in tests only. + + Returns + ------- + low, high : tuple + The returned values low and high lie within the interval. + """ + eps = 10 * np.finfo(dtype).eps + if interval.low == -np.inf: + low = -1e10 + elif interval.low < 0: + low = interval.low * (1 - eps) + eps + else: + low = interval.low * (1 + eps) + eps + + if interval.high == np.inf: + high = 1e10 + elif interval.high < 0: + high = interval.high * (1 + eps) - eps + else: + high = interval.high * (1 - eps) - eps + + return low, high + + +class BaseLink(ABC): + """Abstract base class for differentiable, invertible link functions. + + Convention: + - link function g: raw_prediction = g(y_pred) + - inverse link h: y_pred = h(raw_prediction) + + For (generalized) linear models, `raw_prediction = X @ coef` is the so + called linear predictor, and `y_pred = h(raw_prediction)` is the predicted + conditional (on X) expected value of the target `y_true`. + + The methods are not implemented as staticmethods in case a link function needs + parameters. + """ + + is_multiclass = False # used for testing only + + # Usually, raw_prediction may be any real number and y_pred is an open + # interval. + # interval_raw_prediction = Interval(-np.inf, np.inf, False, False) + interval_y_pred = Interval(-np.inf, np.inf, False, False) + + @abstractmethod + def link(self, y_pred, out=None): + """Compute the link function g(y_pred). + + The link function maps (predicted) target values to raw predictions, + i.e. `g(y_pred) = raw_prediction`. + + Parameters + ---------- + y_pred : array + Predicted target values. + out : array + A location into which the result is stored. If provided, it must + have a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated array is returned. + + Returns + ------- + out : array + Output array, element-wise link function. + """ + + @abstractmethod + def inverse(self, raw_prediction, out=None): + """Compute the inverse link function h(raw_prediction). + + The inverse link function maps raw predictions to predicted target + values, i.e. `h(raw_prediction) = y_pred`. + + Parameters + ---------- + raw_prediction : array + Raw prediction values (in link space). + out : array + A location into which the result is stored. If provided, it must + have a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated array is returned. + + Returns + ------- + out : array + Output array, element-wise inverse link function. + """ + + +class IdentityLink(BaseLink): + """The identity link function g(x)=x.""" + + def link(self, y_pred, out=None): + if out is not None: + np.copyto(out, y_pred) + return out + else: + return y_pred + + inverse = link + + +class LogLink(BaseLink): + """The log link function g(x)=log(x).""" + + interval_y_pred = Interval(0, np.inf, False, False) + + def link(self, y_pred, out=None): + return np.log(y_pred, out=out) + + def inverse(self, raw_prediction, out=None): + return np.exp(raw_prediction, out=out) + + +class LogitLink(BaseLink): + """The logit link function g(x)=logit(x).""" + + interval_y_pred = Interval(0, 1, False, False) + + def link(self, y_pred, out=None): + return logit(y_pred, out=out) + + def inverse(self, raw_prediction, out=None): + return expit(raw_prediction, out=out) + + +class HalfLogitLink(BaseLink): + """Half the logit link function g(x)=1/2 * logit(x). + + Used for the exponential loss. + """ + + interval_y_pred = Interval(0, 1, False, False) + + def link(self, y_pred, out=None): + out = logit(y_pred, out=out) + out *= 0.5 + return out + + def inverse(self, raw_prediction, out=None): + return expit(2 * raw_prediction, out) + + +class MultinomialLogit(BaseLink): + """The symmetric multinomial logit function. + + Convention: + - y_pred.shape = raw_prediction.shape = (n_samples, n_classes) + + Notes: + - The inverse link h is the softmax function. + - The sum is over the second axis, i.e. axis=1 (n_classes). + + We have to choose additional constraints in order to make + + y_pred[k] = exp(raw_pred[k]) / sum(exp(raw_pred[k]), k=0..n_classes-1) + + for n_classes classes identifiable and invertible. + We choose the symmetric side constraint where the geometric mean response + is set as reference category, see [2]: + + The symmetric multinomial logit link function for a single data point is + then defined as + + raw_prediction[k] = g(y_pred[k]) = log(y_pred[k]/gmean(y_pred)) + = log(y_pred[k]) - mean(log(y_pred)). + + Note that this is equivalent to the definition in [1] and implies mean + centered raw predictions: + + sum(raw_prediction[k], k=0..n_classes-1) = 0. + + For linear models with raw_prediction = X @ coef, this corresponds to + sum(coef[k], k=0..n_classes-1) = 0, i.e. the sum over classes for every + feature is zero. + + Reference + --------- + .. [1] Friedman, Jerome; Hastie, Trevor; Tibshirani, Robert. "Additive + logistic regression: a statistical view of boosting" Ann. Statist. + 28 (2000), no. 2, 337--407. doi:10.1214/aos/1016218223. + https://projecteuclid.org/euclid.aos/1016218223 + + .. [2] Zahid, Faisal Maqbool and Gerhard Tutz. "Ridge estimation for + multinomial logit models with symmetric side constraints." + Computational Statistics 28 (2013): 1017-1034. + http://epub.ub.uni-muenchen.de/11001/1/tr067.pdf + """ + + is_multiclass = True + interval_y_pred = Interval(0, 1, False, False) + + def symmetrize_raw_prediction(self, raw_prediction): + return raw_prediction - np.mean(raw_prediction, axis=1)[:, np.newaxis] + + def link(self, y_pred, out=None): + # geometric mean as reference category + gm = gmean(y_pred, axis=1) + return np.log(y_pred / gm[:, np.newaxis], out=out) + + def inverse(self, raw_prediction, out=None): + if out is None: + return softmax(raw_prediction, copy=True) + else: + np.copyto(out, raw_prediction) + softmax(out, copy=False) + return out + + +_LINKS = { + "identity": IdentityLink, + "log": LogLink, + "logit": LogitLink, + "half_logit": HalfLogitLink, + "multinomial_logit": MultinomialLogit, +} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/loss.py b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b205ed10687ca9ad28ed310318ffa3c1275980 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/loss.py @@ -0,0 +1,1177 @@ +""" +This module contains loss classes suitable for fitting. + +It is not part of the public API. +Specific losses are used for regression, binary classification or multiclass +classification. +""" +# Goals: +# - Provide a common private module for loss functions/classes. +# - To be used in: +# - LogisticRegression +# - PoissonRegressor, GammaRegressor, TweedieRegressor +# - HistGradientBoostingRegressor, HistGradientBoostingClassifier +# - GradientBoostingRegressor, GradientBoostingClassifier +# - SGDRegressor, SGDClassifier +# - Replace link module of GLMs. + +import numbers + +import numpy as np +from scipy.special import xlogy + +from ..utils import check_scalar +from ..utils.stats import _weighted_percentile +from ._loss import ( + CyAbsoluteError, + CyExponentialLoss, + CyHalfBinomialLoss, + CyHalfGammaLoss, + CyHalfMultinomialLoss, + CyHalfPoissonLoss, + CyHalfSquaredError, + CyHalfTweedieLoss, + CyHalfTweedieLossIdentity, + CyHuberLoss, + CyPinballLoss, +) +from .link import ( + HalfLogitLink, + IdentityLink, + Interval, + LogitLink, + LogLink, + MultinomialLogit, +) + + +# Note: The shape of raw_prediction for multiclass classifications are +# - GradientBoostingClassifier: (n_samples, n_classes) +# - HistGradientBoostingClassifier: (n_classes, n_samples) +# +# Note: Instead of inheritance like +# +# class BaseLoss(BaseLink, CyLossFunction): +# ... +# +# # Note: Naturally, we would inherit in the following order +# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss) +# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as +# # the last one. This, of course, changes the MRO. +# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss): +# +# we use composition. This way we improve maintainability by avoiding the above +# mentioned Cython edge case and have easier to understand code (which method calls +# which code). +class BaseLoss: + """Base class for a loss function of 1-dimensional targets. + + Conventions: + + - y_true.shape = sample_weight.shape = (n_samples,) + - y_pred.shape = raw_prediction.shape = (n_samples,) + - If is_multiclass is true (multiclass classification), then + y_pred.shape = raw_prediction.shape = (n_samples, n_classes) + Note that this corresponds to the return value of decision_function. + + y_true, y_pred, sample_weight and raw_prediction must either be all float64 + or all float32. + gradient and hessian must be either both float64 or both float32. + + Note that y_pred = link.inverse(raw_prediction). + + Specific loss classes can inherit specific link classes to satisfy + BaseLink's abstractmethods. + + Parameters + ---------- + sample_weight : {None, ndarray} + If sample_weight is None, the hessian might be constant. + n_classes : {None, int} + The number of classes for classification, else None. + + Attributes + ---------- + closs: CyLossFunction + link : BaseLink + interval_y_true : Interval + Valid interval for y_true + interval_y_pred : Interval + Valid Interval for y_pred + differentiable : bool + Indicates whether or not loss function is differentiable in + raw_prediction everywhere. + need_update_leaves_values : bool + Indicates whether decision trees in gradient boosting need to uptade + leave values after having been fit to the (negative) gradients. + approx_hessian : bool + Indicates whether the hessian is approximated or exact. If, + approximated, it should be larger or equal to the exact one. + constant_hessian : bool + Indicates whether the hessian is one for this loss. + is_multiclass : bool + Indicates whether n_classes > 2 is allowed. + """ + + # For gradient boosted decision trees: + # This variable indicates whether the loss requires the leaves values to + # be updated once the tree has been trained. The trees are trained to + # predict a Newton-Raphson step (see grower._finalize_leaf()). But for + # some losses (e.g. least absolute deviation) we need to adjust the tree + # values to account for the "line search" of the gradient descent + # procedure. See the original paper Greedy Function Approximation: A + # Gradient Boosting Machine by Friedman + # (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory. + differentiable = True + need_update_leaves_values = False + is_multiclass = False + + def __init__(self, closs, link, n_classes=None): + self.closs = closs + self.link = link + self.approx_hessian = False + self.constant_hessian = False + self.n_classes = n_classes + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + self.interval_y_pred = self.link.interval_y_pred + + def in_y_true_range(self, y): + """Return True if y is in the valid range of y_true. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_true.includes(y) + + def in_y_pred_range(self, y): + """Return True if y is in the valid range of y_pred. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_pred.includes(y) + + def loss( + self, + y_true, + raw_prediction, + sample_weight=None, + loss_out=None, + n_threads=1, + ): + """Compute the pointwise loss value for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + loss_out : None or C-contiguous array of shape (n_samples,) + A location into which the result is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : array of shape (n_samples,) + Element-wise loss function. + """ + if loss_out is None: + loss_out = np.empty_like(y_true) + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + + self.closs.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=loss_out, + n_threads=n_threads, + ) + return loss_out + + def loss_gradient( + self, + y_true, + raw_prediction, + sample_weight=None, + loss_out=None, + gradient_out=None, + n_threads=1, + ): + """Compute loss and gradient w.r.t. raw_prediction for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + loss_out : None or C-contiguous array of shape (n_samples,) + A location into which the loss is stored. If None, a new array + might be created. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : array of shape (n_samples,) + Element-wise loss function. + + gradient : array of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + """ + if loss_out is None: + if gradient_out is None: + loss_out = np.empty_like(y_true) + gradient_out = np.empty_like(raw_prediction) + else: + loss_out = np.empty_like(y_true, dtype=gradient_out.dtype) + elif gradient_out is None: + gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + + self.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=loss_out, + gradient_out=gradient_out, + n_threads=n_threads, + ) + return loss_out, gradient_out + + def gradient( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + n_threads=1, + ): + """Compute gradient of loss w.r.t raw_prediction for each input. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the result is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : array of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + """ + if gradient_out is None: + gradient_out = np.empty_like(raw_prediction) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + + self.closs.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + n_threads=n_threads, + ) + return gradient_out + + def gradient_hessian( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + hessian_out=None, + n_threads=1, + ): + """Compute gradient and hessian of loss w.r.t raw_prediction. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + hessian_out : None or C-contiguous array of shape (n_samples,) or array \ + of shape (n_samples, n_classes) + A location into which the hessian is stored. If None, a new array + might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : arrays of shape (n_samples,) or (n_samples, n_classes) + Element-wise gradients. + + hessian : arrays of shape (n_samples,) or (n_samples, n_classes) + Element-wise hessians. + """ + if gradient_out is None: + if hessian_out is None: + gradient_out = np.empty_like(raw_prediction) + hessian_out = np.empty_like(raw_prediction) + else: + gradient_out = np.empty_like(hessian_out) + elif hessian_out is None: + hessian_out = np.empty_like(gradient_out) + + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + if gradient_out.ndim == 2 and gradient_out.shape[1] == 1: + gradient_out = gradient_out.squeeze(1) + if hessian_out.ndim == 2 and hessian_out.shape[1] == 1: + hessian_out = hessian_out.squeeze(1) + + self.closs.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + hessian_out=hessian_out, + n_threads=n_threads, + ) + return gradient_out, hessian_out + + def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1): + """Compute the weighted average loss. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + loss : float + Mean or averaged loss function. + """ + return np.average( + self.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + loss_out=None, + n_threads=n_threads, + ), + weights=sample_weight, + ) + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This can be used as initial estimates of predictions, i.e. before the + first iteration in fit. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Observed, true target values. + sample_weight : None or array of shape (n_samples,) + Sample weights. + + Returns + ------- + raw_prediction : numpy scalar or array of shape (n_classes,) + Raw predictions of an intercept-only model. + """ + # As default, take weighted average of the target over the samples + # axis=0 and then transform into link-scale (raw_prediction). + y_pred = np.average(y_true, weights=sample_weight, axis=0) + eps = 10 * np.finfo(y_pred.dtype).eps + + if self.interval_y_pred.low == -np.inf: + a_min = None + elif self.interval_y_pred.low_inclusive: + a_min = self.interval_y_pred.low + else: + a_min = self.interval_y_pred.low + eps + + if self.interval_y_pred.high == np.inf: + a_max = None + elif self.interval_y_pred.high_inclusive: + a_max = self.interval_y_pred.high + else: + a_max = self.interval_y_pred.high - eps + + if a_min is None and a_max is None: + return self.link.link(y_pred) + else: + return self.link.link(np.clip(y_pred, a_min, a_max)) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + """Calculate term dropped in loss. + + With this term added, the loss of perfect predictions is zero. + """ + return np.zeros_like(y_true) + + def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"): + """Initialize arrays for gradients and hessians. + + Unless hessians are constant, arrays are initialized with undefined values. + + Parameters + ---------- + n_samples : int + The number of samples, usually passed to `fit()`. + dtype : {np.float64, np.float32}, default=np.float64 + The dtype of the arrays gradient and hessian. + order : {'C', 'F'}, default='F' + Order of the arrays gradient and hessian. The default 'F' makes the arrays + contiguous along samples. + + Returns + ------- + gradient : C-contiguous array of shape (n_samples,) or array of shape \ + (n_samples, n_classes) + Empty array (allocated but not initialized) to be used as argument + gradient_out. + hessian : C-contiguous array of shape (n_samples,), array of shape + (n_samples, n_classes) or shape (1,) + Empty (allocated but not initialized) array to be used as argument + hessian_out. + If constant_hessian is True (e.g. `HalfSquaredError`), the array is + initialized to ``1``. + """ + if dtype not in (np.float32, np.float64): + raise ValueError( + "Valid options for 'dtype' are np.float32 and np.float64. " + f"Got dtype={dtype} instead." + ) + + if self.is_multiclass: + shape = (n_samples, self.n_classes) + else: + shape = (n_samples,) + gradient = np.empty(shape=shape, dtype=dtype, order=order) + + if self.constant_hessian: + # If the hessians are constant, we consider them equal to 1. + # - This is correct for HalfSquaredError + # - For AbsoluteError, hessians are actually 0, but they are + # always ignored anyway. + hessian = np.ones(shape=(1,), dtype=dtype) + else: + hessian = np.empty(shape=shape, dtype=dtype, order=order) + + return gradient, hessian + + +# Note: Naturally, we would inherit in the following order +# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss) +# But because of https://github.com/cython/cython/issues/4350 we +# set BaseLoss as the last one. This, of course, changes the MRO. +class HalfSquaredError(BaseLoss): + """Half squared error with identity link, for regression. + + Domain: + y_true and y_pred all real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, half squared error is defined as:: + + loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2 + + The factor of 0.5 simplifies the computation of gradients and results in a + unit hessian (and is consistent with what is done in LightGBM). It is also + half the Normal distribution deviance. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfSquaredError(), link=IdentityLink()) + self.constant_hessian = sample_weight is None + + +class AbsoluteError(BaseLoss): + """Absolute error with identity link, for regression. + + Domain: + y_true and y_pred all real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, the absolute error is defined as:: + + loss(x_i) = |y_true_i - raw_prediction_i| + + Note that the exact hessian = 0 almost everywhere (except at one point, therefore + differentiable = False). Optimization routines like in HGBT, however, need a + hessian > 0. Therefore, we assign 1. + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None): + super().__init__(closs=CyAbsoluteError(), link=IdentityLink()) + self.approx_hessian = True + self.constant_hessian = sample_weight is None + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + if sample_weight is None: + return np.median(y_true, axis=0) + else: + return _weighted_percentile(y_true, sample_weight, 50) + + +class PinballLoss(BaseLoss): + """Quantile loss aka pinball loss, for regression. + + Domain: + y_true and y_pred all real numbers + quantile in (0, 1) + + Link: + y_pred = raw_prediction + + For a given sample x_i, the pinball loss is defined as:: + + loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i) + + rho_{quantile}(u) = u * (quantile - 1_{u<0}) + = -u *(1 - quantile) if u < 0 + u * quantile if u >= 0 + + Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError(). + + Note that the exact hessian = 0 almost everywhere (except at one point, therefore + differentiable = False). Optimization routines like in HGBT, however, need a + hessian > 0. Therefore, we assign 1. + + Additional Attributes + --------------------- + quantile : float + The quantile level of the quantile to be estimated. Must be in range (0, 1). + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None, quantile=0.5): + check_scalar( + quantile, + "quantile", + target_type=numbers.Real, + min_val=0, + max_val=1, + include_boundaries="neither", + ) + super().__init__( + closs=CyPinballLoss(quantile=float(quantile)), + link=IdentityLink(), + ) + self.approx_hessian = True + self.constant_hessian = sample_weight is None + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + if sample_weight is None: + return np.percentile(y_true, 100 * self.closs.quantile, axis=0) + else: + return _weighted_percentile( + y_true, sample_weight, 100 * self.closs.quantile + ) + + +class HuberLoss(BaseLoss): + """Huber loss, for regression. + + Domain: + y_true and y_pred all real numbers + quantile in (0, 1) + + Link: + y_pred = raw_prediction + + For a given sample x_i, the Huber loss is defined as:: + + loss(x_i) = 1/2 * abserr**2 if abserr <= delta + delta * (abserr - delta/2) if abserr > delta + + abserr = |y_true_i - raw_prediction_i| + delta = quantile(abserr, self.quantile) + + Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0) + equals delta * (AbsoluteError() - delta/2). + + Additional Attributes + --------------------- + quantile : float + The quantile level which defines the breaking point `delta` to distinguish + between absolute error and squared error. Must be in range (0, 1). + + Reference + --------- + .. [1] Friedman, J.H. (2001). :doi:`Greedy function approximation: A gradient + boosting machine <10.1214/aos/1013203451>`. + Annals of Statistics, 29, 1189-1232. + """ + + differentiable = False + need_update_leaves_values = True + + def __init__(self, sample_weight=None, quantile=0.9, delta=0.5): + check_scalar( + quantile, + "quantile", + target_type=numbers.Real, + min_val=0, + max_val=1, + include_boundaries="neither", + ) + self.quantile = quantile # This is better stored outside of Cython. + super().__init__( + closs=CyHuberLoss(delta=float(delta)), + link=IdentityLink(), + ) + self.approx_hessian = True + self.constant_hessian = False + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the weighted median of the target, i.e. over the samples + axis=0. + """ + # See formula before algo 4 in Friedman (2001), but we apply it to y_true, + # not to the residual y_true - raw_prediction. An estimator like + # HistGradientBoostingRegressor might then call it on the residual, e.g. + # fit_intercept_only(y_true - raw_prediction). + if sample_weight is None: + median = np.percentile(y_true, 50, axis=0) + else: + median = _weighted_percentile(y_true, sample_weight, 50) + diff = y_true - median + term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff)) + return median + np.average(term, weights=sample_weight) + + +class HalfPoissonLoss(BaseLoss): + """Half Poisson deviance loss with log-link, for regression. + + Domain: + y_true in non-negative real numbers + y_pred in positive real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half the Poisson deviance is defined as:: + + loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i)) + - y_true_i + exp(raw_prediction_i) + + Half the Poisson deviance is actually the negative log-likelihood up to + constant terms (not involving raw_prediction) and simplifies the + computation of the gradients. + We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfPoissonLoss(), link=LogLink()) + self.interval_y_true = Interval(0, np.inf, True, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + term = xlogy(y_true, y_true) - y_true + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfGammaLoss(BaseLoss): + """Half Gamma deviance loss with log-link, for regression. + + Domain: + y_true and y_pred in positive real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half Gamma deviance loss is defined as:: + + loss(x_i) = log(exp(raw_prediction_i)/y_true_i) + + y_true/exp(raw_prediction_i) - 1 + + Half the Gamma deviance is actually proportional to the negative log- + likelihood up to constant terms (not involving raw_prediction) and + simplifies the computation of the gradients. + We also skip the constant term `-log(y_true_i) - 1`. + """ + + def __init__(self, sample_weight=None): + super().__init__(closs=CyHalfGammaLoss(), link=LogLink()) + self.interval_y_true = Interval(0, np.inf, False, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + term = -np.log(y_true) - 1 + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfTweedieLoss(BaseLoss): + """Half Tweedie deviance loss with log-link, for regression. + + Domain: + y_true in real numbers for power <= 0 + y_true in non-negative real numbers for 0 < power < 2 + y_true in positive real numbers for 2 <= power + y_pred in positive real numbers + power in real numbers + + Link: + y_pred = exp(raw_prediction) + + For a given sample x_i, half Tweedie deviance loss with p=power is defined + as:: + + loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) + - y_true_i * exp(raw_prediction_i)**(1-p) / (1-p) + + exp(raw_prediction_i)**(2-p) / (2-p) + + Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link, + HalfPoissonLoss and HalfGammaLoss. + + We also skip constant terms, but those are different for p=0, 1, 2. + Therefore, the loss is not continuous in `power`. + + Note furthermore that although no Tweedie distribution exists for + 0 < power < 1, it still gives a strictly consistent scoring function for + the expectation. + """ + + def __init__(self, sample_weight=None, power=1.5): + super().__init__( + closs=CyHalfTweedieLoss(power=float(power)), + link=LogLink(), + ) + if self.closs.power <= 0: + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + elif self.closs.power < 2: + self.interval_y_true = Interval(0, np.inf, True, False) + else: + self.interval_y_true = Interval(0, np.inf, False, False) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + if self.closs.power == 0: + return HalfSquaredError().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + elif self.closs.power == 1: + return HalfPoissonLoss().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + elif self.closs.power == 2: + return HalfGammaLoss().constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + else: + p = self.closs.power + term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p) + if sample_weight is not None: + term *= sample_weight + return term + + +class HalfTweedieLossIdentity(BaseLoss): + """Half Tweedie deviance loss with identity link, for regression. + + Domain: + y_true in real numbers for power <= 0 + y_true in non-negative real numbers for 0 < power < 2 + y_true in positive real numbers for 2 <= power + y_pred in positive real numbers for power != 0 + y_pred in real numbers for power = 0 + power in real numbers + + Link: + y_pred = raw_prediction + + For a given sample x_i, half Tweedie deviance loss with p=power is defined + as:: + + loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) + - y_true_i * raw_prediction_i**(1-p) / (1-p) + + raw_prediction_i**(2-p) / (2-p) + + Note that the minimum value of this loss is 0. + + Note furthermore that although no Tweedie distribution exists for + 0 < power < 1, it still gives a strictly consistent scoring function for + the expectation. + """ + + def __init__(self, sample_weight=None, power=1.5): + super().__init__( + closs=CyHalfTweedieLossIdentity(power=float(power)), + link=IdentityLink(), + ) + if self.closs.power <= 0: + self.interval_y_true = Interval(-np.inf, np.inf, False, False) + elif self.closs.power < 2: + self.interval_y_true = Interval(0, np.inf, True, False) + else: + self.interval_y_true = Interval(0, np.inf, False, False) + + if self.closs.power == 0: + self.interval_y_pred = Interval(-np.inf, np.inf, False, False) + else: + self.interval_y_pred = Interval(0, np.inf, False, False) + + +class HalfBinomialLoss(BaseLoss): + """Half Binomial deviance loss with logit link, for binary classification. + + This is also know as binary cross entropy, log-loss and logistic loss. + + Domain: + y_true in [0, 1], i.e. regression on the unit interval + y_pred in (0, 1), i.e. boundaries excluded + + Link: + y_pred = expit(raw_prediction) + + For a given sample x_i, half Binomial deviance is defined as the negative + log-likelihood of the Binomial/Bernoulli distribution and can be expressed + as:: + + loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i + + See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman, + section 4.4.1 (about logistic regression). + + Note that the formulation works for classification, y = {0, 1}, as well as + logistic regression, y = [0, 1]. + If you add `constant_to_optimal_zero` to the loss, you get half the + Bernoulli/binomial deviance. + + More details: Inserting the predicted probability y_pred = expit(raw_prediction) + in the loss gives the well known:: + + loss(x_i) = - y_true_i * log(y_pred_i) - (1 - y_true_i) * log(1 - y_pred_i) + """ + + def __init__(self, sample_weight=None): + super().__init__( + closs=CyHalfBinomialLoss(), + link=LogitLink(), + n_classes=2, + ) + self.interval_y_true = Interval(0, 1, True, True) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + # This is non-zero only if y_true is neither 0 nor 1. + term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true) + if sample_weight is not None: + term *= sample_weight + return term + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples,) or (n_samples, 1) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, 2) + Element-wise class probabilities. + """ + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype) + proba[:, 1] = self.link.inverse(raw_prediction) + proba[:, 0] = 1 - proba[:, 1] + return proba + + +class HalfMultinomialLoss(BaseLoss): + """Categorical cross-entropy loss, for multiclass classification. + + Domain: + y_true in {0, 1, 2, 3, .., n_classes - 1} + y_pred has n_classes elements, each element in (0, 1) + + Link: + y_pred = softmax(raw_prediction) + + Note: We assume y_true to be already label encoded. The inverse link is + softmax. But the full link function is the symmetric multinomial logit + function. + + For a given sample x_i, the categorical cross-entropy loss is defined as + the negative log-likelihood of the multinomial distribution, it + generalizes the binary cross-entropy to more than 2 classes:: + + loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1)) + - sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1) + + See [1]. + + Note that for the hessian, we calculate only the diagonal part in the + classes: If the full hessian for classes k and l and sample i is H_i_k_l, + we calculate H_i_k_k, i.e. k=l. + + Reference + --------- + .. [1] :arxiv:`Simon, Noah, J. Friedman and T. Hastie. + "A Blockwise Descent Algorithm for Group-penalized Multiresponse and + Multinomial Regression". + <1311.6529>` + """ + + is_multiclass = True + + def __init__(self, sample_weight=None, n_classes=3): + super().__init__( + closs=CyHalfMultinomialLoss(), + link=MultinomialLogit(), + n_classes=n_classes, + ) + self.interval_y_true = Interval(0, np.inf, True, False) + self.interval_y_pred = Interval(0, 1, False, False) + + def in_y_true_range(self, y): + """Return True if y is in the valid range of y_true. + + Parameters + ---------- + y : ndarray + """ + return self.interval_y_true.includes(y) and np.all(y.astype(int) == y) + + def fit_intercept_only(self, y_true, sample_weight=None): + """Compute raw_prediction of an intercept-only model. + + This is the softmax of the weighted average of the target, i.e. over + the samples axis=0. + """ + out = np.zeros(self.n_classes, dtype=y_true.dtype) + eps = np.finfo(y_true.dtype).eps + for k in range(self.n_classes): + out[k] = np.average(y_true == k, weights=sample_weight, axis=0) + out[k] = np.clip(out[k], eps, 1 - eps) + return self.link.link(out[None, :]).reshape(-1) + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples, n_classes) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, n_classes) + Element-wise class probabilities. + """ + return self.link.inverse(raw_prediction) + + def gradient_proba( + self, + y_true, + raw_prediction, + sample_weight=None, + gradient_out=None, + proba_out=None, + n_threads=1, + ): + """Compute gradient and class probabilities fow raw_prediction. + + Parameters + ---------- + y_true : C-contiguous array of shape (n_samples,) + Observed, true target values. + raw_prediction : array of shape (n_samples, n_classes) + Raw prediction values (in link space). + sample_weight : None or C-contiguous array of shape (n_samples,) + Sample weights. + gradient_out : None or array of shape (n_samples, n_classes) + A location into which the gradient is stored. If None, a new array + might be created. + proba_out : None or array of shape (n_samples, n_classes) + A location into which the class probabilities are stored. If None, + a new array might be created. + n_threads : int, default=1 + Might use openmp thread parallelism. + + Returns + ------- + gradient : array of shape (n_samples, n_classes) + Element-wise gradients. + + proba : array of shape (n_samples, n_classes) + Element-wise class probabilities. + """ + if gradient_out is None: + if proba_out is None: + gradient_out = np.empty_like(raw_prediction) + proba_out = np.empty_like(raw_prediction) + else: + gradient_out = np.empty_like(proba_out) + elif proba_out is None: + proba_out = np.empty_like(gradient_out) + + self.closs.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=gradient_out, + proba_out=proba_out, + n_threads=n_threads, + ) + return gradient_out, proba_out + + +class ExponentialLoss(BaseLoss): + """Exponential loss with (half) logit link, for binary classification. + + This is also know as boosting loss. + + Domain: + y_true in [0, 1], i.e. regression on the unit interval + y_pred in (0, 1), i.e. boundaries excluded + + Link: + y_pred = expit(2 * raw_prediction) + + For a given sample x_i, the exponential loss is defined as:: + + loss(x_i) = y_true_i * exp(-raw_pred_i)) + (1 - y_true_i) * exp(raw_pred_i) + + See: + - J. Friedman, T. Hastie, R. Tibshirani. + "Additive logistic regression: a statistical view of boosting (With discussion + and a rejoinder by the authors)." Ann. Statist. 28 (2) 337 - 407, April 2000. + https://doi.org/10.1214/aos/1016218223 + - A. Buja, W. Stuetzle, Y. Shen. (2005). + "Loss Functions for Binary Class Probability Estimation and Classification: + Structure and Applications." + + Note that the formulation works for classification, y = {0, 1}, as well as + "exponential logistic" regression, y = [0, 1]. + Note that this is a proper scoring rule, but without it's canonical link. + + More details: Inserting the predicted probability + y_pred = expit(2 * raw_prediction) in the loss gives:: + + loss(x_i) = y_true_i * sqrt((1 - y_pred_i) / y_pred_i) + + (1 - y_true_i) * sqrt(y_pred_i / (1 - y_pred_i)) + """ + + def __init__(self, sample_weight=None): + super().__init__( + closs=CyExponentialLoss(), + link=HalfLogitLink(), + n_classes=2, + ) + self.interval_y_true = Interval(0, 1, True, True) + + def constant_to_optimal_zero(self, y_true, sample_weight=None): + # This is non-zero only if y_true is neither 0 nor 1. + term = -2 * np.sqrt(y_true * (1 - y_true)) + if sample_weight is not None: + term *= sample_weight + return term + + def predict_proba(self, raw_prediction): + """Predict probabilities. + + Parameters + ---------- + raw_prediction : array of shape (n_samples,) or (n_samples, 1) + Raw prediction values (in link space). + + Returns + ------- + proba : array of shape (n_samples, 2) + Element-wise class probabilities. + """ + # Be graceful to shape (n_samples, 1) -> (n_samples,) + if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1: + raw_prediction = raw_prediction.squeeze(1) + proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype) + proba[:, 1] = self.link.inverse(raw_prediction) + proba[:, 0] = 1 - proba[:, 1] + return proba + + +_LOSSES = { + "squared_error": HalfSquaredError, + "absolute_error": AbsoluteError, + "pinball_loss": PinballLoss, + "huber_loss": HuberLoss, + "poisson_loss": HalfPoissonLoss, + "gamma_loss": HalfGammaLoss, + "tweedie_loss": HalfTweedieLoss, + "binomial_loss": HalfBinomialLoss, + "multinomial_loss": HalfMultinomialLoss, + "exponential_loss": ExponentialLoss, +} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b3ea8fb9ed1539692152e35599899694d233634 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a0566188fbc798f7a16069f0cf22266d4467528 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d21729ff478fd79946f711ecbc7312049e25f469 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a665f8d48ac9e356971346774a125b18d234d9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py @@ -0,0 +1,111 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from sklearn._loss.link import ( + _LINKS, + HalfLogitLink, + Interval, + MultinomialLogit, + _inclusive_low_high, +) + +LINK_FUNCTIONS = list(_LINKS.values()) + + +def test_interval_raises(): + """Test that interval with low > high raises ValueError.""" + with pytest.raises( + ValueError, match="One must have low <= high; got low=1, high=0." + ): + Interval(1, 0, False, False) + + +@pytest.mark.parametrize( + "interval", + [ + Interval(0, 1, False, False), + Interval(0, 1, False, True), + Interval(0, 1, True, False), + Interval(0, 1, True, True), + Interval(-np.inf, np.inf, False, False), + Interval(-np.inf, np.inf, False, True), + Interval(-np.inf, np.inf, True, False), + Interval(-np.inf, np.inf, True, True), + Interval(-10, -1, False, False), + Interval(-10, -1, False, True), + Interval(-10, -1, True, False), + Interval(-10, -1, True, True), + ], +) +def test_is_in_range(interval): + # make sure low and high are always within the interval, used for linspace + low, high = _inclusive_low_high(interval) + + x = np.linspace(low, high, num=10) + assert interval.includes(x) + + # x contains lower bound + assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive + + # x contains upper bound + assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive + + # x contains upper and lower bound + assert interval.includes(np.r_[x, interval.low, interval.high]) == ( + interval.low_inclusive and interval.high_inclusive + ) + + +@pytest.mark.parametrize("link", LINK_FUNCTIONS) +def test_link_inverse_identity(link, global_random_seed): + # Test that link of inverse gives identity. + rng = np.random.RandomState(global_random_seed) + link = link() + n_samples, n_classes = 100, None + # The values for `raw_prediction` are limited from -20 to 20 because in the + # class `LogitLink` the term `expit(x)` comes very close to 1 for large + # positive x and therefore loses precision. + if link.is_multiclass: + n_classes = 10 + raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes)) + if isinstance(link, MultinomialLogit): + raw_prediction = link.symmetrize_raw_prediction(raw_prediction) + elif isinstance(link, HalfLogitLink): + raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples)) + else: + raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples)) + + assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction) + y_pred = link.inverse(raw_prediction) + assert_allclose(link.inverse(link.link(y_pred)), y_pred) + + +@pytest.mark.parametrize("link", LINK_FUNCTIONS) +def test_link_out_argument(link): + # Test that out argument gets assigned the result. + rng = np.random.RandomState(42) + link = link() + n_samples, n_classes = 100, None + if link.is_multiclass: + n_classes = 10 + raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes)) + if isinstance(link, MultinomialLogit): + raw_prediction = link.symmetrize_raw_prediction(raw_prediction) + else: + # So far, the valid interval of raw_prediction is (-inf, inf) and + # we do not need to distinguish. + raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples)) + + y_pred = link.inverse(raw_prediction, out=None) + out = np.empty_like(raw_prediction) + y_pred_2 = link.inverse(raw_prediction, out=out) + assert_allclose(y_pred, out) + assert_array_equal(out, y_pred_2) + assert np.shares_memory(out, y_pred_2) + + out = np.empty_like(y_pred) + raw_prediction_2 = link.link(y_pred, out=out) + assert_allclose(raw_prediction, out) + assert_array_equal(out, raw_prediction_2) + assert np.shares_memory(out, raw_prediction_2) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..9c8bba4d717d174fef91cee2210a52e232f90d5b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py @@ -0,0 +1,1320 @@ +import pickle + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_equal +from pytest import approx +from scipy.optimize import ( + LinearConstraint, + minimize, + minimize_scalar, + newton, +) +from scipy.special import logsumexp + +from sklearn._loss.link import IdentityLink, _inclusive_low_high +from sklearn._loss.loss import ( + _LOSSES, + AbsoluteError, + BaseLoss, + HalfBinomialLoss, + HalfGammaLoss, + HalfMultinomialLoss, + HalfPoissonLoss, + HalfSquaredError, + HalfTweedieLoss, + HalfTweedieLossIdentity, + HuberLoss, + PinballLoss, +) +from sklearn.utils import _IS_WASM, assert_all_finite +from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit + +ALL_LOSSES = list(_LOSSES.values()) + +LOSS_INSTANCES = [loss() for loss in ALL_LOSSES] +# HalfTweedieLoss(power=1.5) is already there as default +LOSS_INSTANCES += [ + PinballLoss(quantile=0.25), + HuberLoss(quantile=0.75), + HalfTweedieLoss(power=-1.5), + HalfTweedieLoss(power=0), + HalfTweedieLoss(power=1), + HalfTweedieLoss(power=2), + HalfTweedieLoss(power=3.0), + HalfTweedieLossIdentity(power=0), + HalfTweedieLossIdentity(power=1), + HalfTweedieLossIdentity(power=2), + HalfTweedieLossIdentity(power=3.0), +] + + +def loss_instance_name(param): + if isinstance(param, BaseLoss): + loss = param + name = loss.__class__.__name__ + if isinstance(loss, PinballLoss): + name += f"(quantile={loss.closs.quantile})" + elif isinstance(loss, HuberLoss): + name += f"(quantile={loss.quantile}" + elif hasattr(loss, "closs") and hasattr(loss.closs, "power"): + name += f"(power={loss.closs.power})" + return name + else: + return str(param) + + +def random_y_true_raw_prediction( + loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42 +): + """Random generate y_true and raw_prediction in valid range.""" + rng = np.random.RandomState(seed) + if loss.is_multiclass: + raw_prediction = np.empty((n_samples, loss.n_classes)) + raw_prediction.flat[:] = rng.uniform( + low=raw_bound[0], + high=raw_bound[1], + size=n_samples * loss.n_classes, + ) + y_true = np.arange(n_samples).astype(float) % loss.n_classes + else: + # If link is identity, we must respect the interval of y_pred: + if isinstance(loss.link, IdentityLink): + low, high = _inclusive_low_high(loss.interval_y_pred) + low = np.amax([low, raw_bound[0]]) + high = np.amin([high, raw_bound[1]]) + raw_bound = (low, high) + raw_prediction = rng.uniform( + low=raw_bound[0], high=raw_bound[1], size=n_samples + ) + # generate a y_true in valid range + low, high = _inclusive_low_high(loss.interval_y_true) + low = max(low, y_bound[0]) + high = min(high, y_bound[1]) + y_true = rng.uniform(low, high, size=n_samples) + # set some values at special boundaries + if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive: + y_true[:: (n_samples // 3)] = 0 + if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive: + y_true[1 :: (n_samples // 3)] = 1 + + return y_true, raw_prediction + + +def numerical_derivative(func, x, eps): + """Helper function for numerical (first) derivatives.""" + # For numerical derivatives, see + # https://en.wikipedia.org/wiki/Numerical_differentiation + # https://en.wikipedia.org/wiki/Finite_difference_coefficient + # We use central finite differences of accuracy 4. + h = np.full_like(x, fill_value=eps) + f_minus_2h = func(x - 2 * h) + f_minus_1h = func(x - h) + f_plus_1h = func(x + h) + f_plus_2h = func(x + 2 * h) + return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_loss_boundary(loss): + """Test interval ranges of y_true and y_pred in losses.""" + # make sure low and high are always within the interval, used for linspace + if loss.is_multiclass: + y_true = np.linspace(0, 9, num=10) + else: + low, high = _inclusive_low_high(loss.interval_y_true) + y_true = np.linspace(low, high, num=10) + + # add boundaries if they are included + if loss.interval_y_true.low_inclusive: + y_true = np.r_[y_true, loss.interval_y_true.low] + if loss.interval_y_true.high_inclusive: + y_true = np.r_[y_true, loss.interval_y_true.high] + + assert loss.in_y_true_range(y_true) + + n = y_true.shape[0] + low, high = _inclusive_low_high(loss.interval_y_pred) + if loss.is_multiclass: + y_pred = np.empty((n, 3)) + y_pred[:, 0] = np.linspace(low, high, num=n) + y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0]) + y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0]) + else: + y_pred = np.linspace(low, high, num=n) + + assert loss.in_y_pred_range(y_pred) + + # calculating losses should not fail + raw_prediction = loss.link.link(y_pred) + loss.loss(y_true=y_true, raw_prediction=raw_prediction) + + +# Fixture to test valid value ranges. +Y_COMMON_PARAMS = [ + # (loss, [y success], [y fail]) + (HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (HuberLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]), + (HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]), + (HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]), + (HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]), + (HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]), + (HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]), +] +# y_pred and y_true do not always have the same domain (valid value range). +# Hence, we define extra sets of parameters for each of them. +Y_TRUE_PARAMS = [ # type: ignore + # (loss, [y success], [y fail]) + (HalfPoissonLoss(), [0], []), + (HuberLoss(), [0], []), + (HalfTweedieLoss(power=-3), [-100, -0.1, 0], []), + (HalfTweedieLoss(power=0), [-100, 0], []), + (HalfTweedieLoss(power=1.5), [0], []), + (HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []), + (HalfTweedieLossIdentity(power=0), [-100, 0], []), + (HalfTweedieLossIdentity(power=1.5), [0], []), + (HalfBinomialLoss(), [0, 1], []), + (HalfMultinomialLoss(), [0.0, 1.0, 2], []), +] +Y_PRED_PARAMS = [ + # (loss, [y success], [y fail]) + (HalfPoissonLoss(), [], [0]), + (HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]), + (HalfTweedieLoss(power=0), [], [-3, -0.1, 0]), + (HalfTweedieLoss(power=1.5), [], [0]), + (HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]), + (HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []), + (HalfTweedieLossIdentity(power=1.5), [], [0]), + (HalfBinomialLoss(), [], [0, 1]), + (HalfMultinomialLoss(), [0.1, 0.5], [0, 1]), +] + + +@pytest.mark.parametrize( + "loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS +) +def test_loss_boundary_y_true(loss, y_true_success, y_true_fail): + """Test boundaries of y_true for loss functions.""" + for y in y_true_success: + assert loss.in_y_true_range(np.array([y])) + for y in y_true_fail: + assert not loss.in_y_true_range(np.array([y])) + + +@pytest.mark.parametrize( + "loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore +) +def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail): + """Test boundaries of y_pred for loss functions.""" + for y in y_pred_success: + assert loss.in_y_pred_range(np.array([y])) + for y in y_pred_fail: + assert not loss.in_y_pred_range(np.array([y])) + + +@pytest.mark.parametrize( + "loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true", + [ + (HalfSquaredError(), 1.0, 5.0, 8, 4, 1), + (AbsoluteError(), 1.0, 5.0, 4.0, 1.0, None), + (PinballLoss(quantile=0.5), 1.0, 5.0, 2, 0.5, None), + (PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25), 1 - 0.25, None), + (PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25, -0.25, None), + (HuberLoss(quantile=0.5, delta=3), 1.0, 5.0, 3 * (4 - 3 / 2), None, None), + (HuberLoss(quantile=0.5, delta=3), 1.0, 3.0, 0.5 * 2**2, None, None), + (HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4), 4 - 2, 4), + (HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4, 1 - 2 / 4, 2 / 4), + (HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2, None, None), + (HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2), None, None), + (HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2, None, None), + ( + HalfTweedieLossIdentity(power=3), + 2.0, + 4.0, + -1 / 4 + 1 / 4**2 + 1 / 2 / 2, + None, + None, + ), + ( + HalfBinomialLoss(), + 0.25, + np.log(4), + np.log1p(4) - 0.25 * np.log(4), + None, + None, + ), + # Extreme log loss cases, checked with mpmath: + # import mpmath as mp + # + # # Stolen from scipy + # def mpf2float(x): + # return float(mp.nstr(x, 17, min_fixed=0, max_fixed=0)) + # + # def mp_logloss(y_true, raw): + # with mp.workdps(100): + # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw)) + # out = mp.log1p(mp.exp(raw)) - y_true * raw + # return mpf2float(out) + # + # def mp_gradient(y_true, raw): + # with mp.workdps(100): + # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw)) + # out = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) - y_true + # return mpf2float(out) + # + # def mp_hessian(y_true, raw): + # with mp.workdps(100): + # y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw)) + # p = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) + # out = p * (mp.mpf(1) - p) + # return mpf2float(out) + # + # y, raw = 0.0, 37. + # mp_logloss(y, raw), mp_gradient(y, raw), mp_hessian(y, raw) + (HalfBinomialLoss(), 0.0, -1e20, 0, 0, 0), + (HalfBinomialLoss(), 1.0, -1e20, 1e20, -1, 0), + (HalfBinomialLoss(), 0.0, -1e3, 0, 0, 0), + (HalfBinomialLoss(), 1.0, -1e3, 1e3, -1, 0), + (HalfBinomialLoss(), 1.0, -37.5, 37.5, -1, 0), + (HalfBinomialLoss(), 1.0, -37.0, 37, 1e-16 - 1, 8.533047625744065e-17), + (HalfBinomialLoss(), 0.0, -37.0, *[8.533047625744065e-17] * 3), + (HalfBinomialLoss(), 1.0, -36.9, 36.9, 1e-16 - 1, 9.430476078526806e-17), + (HalfBinomialLoss(), 0.0, -36.9, *[9.430476078526806e-17] * 3), + (HalfBinomialLoss(), 0.0, 37.0, 37, 1 - 1e-16, 8.533047625744065e-17), + (HalfBinomialLoss(), 1.0, 37.0, *[8.533047625744066e-17] * 3), + (HalfBinomialLoss(), 0.0, 37.5, 37.5, 1, 5.175555005801868e-17), + (HalfBinomialLoss(), 0.0, 232.8, 232.8, 1, 1.4287342391028437e-101), + (HalfBinomialLoss(), 1.0, 1e20, 0, 0, 0), + (HalfBinomialLoss(), 0.0, 1e20, 1e20, 1, 0), + ( + HalfBinomialLoss(), + 1.0, + 232.8, + 0, + -1.4287342391028437e-101, + 1.4287342391028437e-101, + ), + (HalfBinomialLoss(), 1.0, 232.9, 0, 0, 0), + (HalfBinomialLoss(), 1.0, 1e3, 0, 0, 0), + (HalfBinomialLoss(), 0.0, 1e3, 1e3, 1, 0), + ( + HalfMultinomialLoss(n_classes=3), + 0.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.2, + None, + None, + ), + ( + HalfMultinomialLoss(n_classes=3), + 1.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.5, + None, + None, + ), + ( + HalfMultinomialLoss(n_classes=3), + 2.0, + [0.2, 0.5, 0.3], + logsumexp([0.2, 0.5, 0.3]) - 0.3, + None, + None, + ), + ( + HalfMultinomialLoss(n_classes=3), + 2.0, + [1e4, 0, 7e-7], + logsumexp([1e4, 0, 7e-7]) - (7e-7), + None, + None, + ), + ], + ids=loss_instance_name, +) +def test_loss_on_specific_values( + loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true +): + """Test losses, gradients and hessians at specific values.""" + loss1 = loss(y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])) + grad1 = loss.gradient( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) + loss2, grad2 = loss.loss_gradient( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) + grad3, hess = loss.gradient_hessian( + y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]) + ) + + assert loss1 == approx(loss_true, rel=1e-15, abs=1e-15) + assert loss2 == approx(loss_true, rel=1e-15, abs=1e-15) + + if gradient_true is not None: + assert grad1 == approx(gradient_true, rel=1e-15, abs=1e-15) + assert grad2 == approx(gradient_true, rel=1e-15, abs=1e-15) + assert grad3 == approx(gradient_true, rel=1e-15, abs=1e-15) + + if hessian_true is not None: + assert hess == approx(hessian_true, rel=1e-15, abs=1e-15) + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize("readonly_memmap", [False, True]) +@pytest.mark.parametrize("dtype_in", [np.float32, np.float64]) +@pytest.mark.parametrize("dtype_out", [np.float32, np.float64]) +@pytest.mark.parametrize("sample_weight", [None, 1]) +@pytest.mark.parametrize("out1", [None, 1]) +@pytest.mark.parametrize("out2", [None, 1]) +@pytest.mark.parametrize("n_threads", [1, 2]) +def test_loss_dtype( + loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads +): + """Test acceptance of dtypes, readonly and writeable arrays in loss functions. + + Check that loss accepts if all input arrays are either all float32 or all + float64, and all output arrays are either all float32 or all float64. + + Also check that input arrays can be readonly, e.g. memory mapped. + """ + if _IS_WASM and readonly_memmap: # pragma: nocover + pytest.xfail(reason="memmap not fully supported") + + loss = loss() + # generate a y_true and raw_prediction in valid range + n_samples = 5 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + y_true = y_true.astype(dtype_in) + raw_prediction = raw_prediction.astype(dtype_in) + + if sample_weight is not None: + sample_weight = np.array([2.0] * n_samples, dtype=dtype_in) + if out1 is not None: + out1 = np.empty_like(y_true, dtype=dtype_out) + if out2 is not None: + out2 = np.empty_like(raw_prediction, dtype=dtype_out) + + if readonly_memmap: + y_true = create_memmap_backed_data(y_true) + raw_prediction = create_memmap_backed_data(raw_prediction) + if sample_weight is not None: + sample_weight = create_memmap_backed_data(sample_weight) + + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out1, + n_threads=n_threads, + ) + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out2, + n_threads=n_threads, + ) + loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out1, + gradient_out=out2, + n_threads=n_threads, + ) + if out1 is not None and loss.is_multiclass: + out1 = np.empty_like(raw_prediction, dtype=dtype_out) + loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out1, + hessian_out=out2, + n_threads=n_threads, + ) + loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight) + loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) + loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight) + if hasattr(loss, "predict_proba"): + loss.predict_proba(raw_prediction=raw_prediction) + if hasattr(loss, "gradient_proba"): + loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out1, + proba_out=out2, + n_threads=n_threads, + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_same_as_C_functions(loss, sample_weight): + """Test that Python and Cython functions return same results.""" + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + out_l1 = np.empty_like(y_true) + out_l2 = np.empty_like(y_true) + out_g1 = np.empty_like(raw_prediction) + out_g2 = np.empty_like(raw_prediction) + out_h1 = np.empty_like(raw_prediction) + out_h2 = np.empty_like(raw_prediction) + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + ) + loss.closs.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + ), + assert_allclose(out_l1, out_l2) + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + ) + loss.closs.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g2, + ) + assert_allclose(out_g1, out_g2) + loss.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + gradient_out=out_g1, + ) + loss.closs.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + gradient_out=out_g2, + ) + assert_allclose(out_l1, out_l2) + assert_allclose(out_g1, out_g2) + loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + hessian_out=out_h1, + ) + loss.closs.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g2, + hessian_out=out_h2, + ) + assert_allclose(out_g1, out_g2) + assert_allclose(out_h1, out_h2) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed): + """Test that loss and gradient are the same across different functions. + + Also test that output arguments contain correct results. + """ + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=global_random_seed, + ) + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + out_l1 = np.empty_like(y_true) + out_l2 = np.empty_like(y_true) + out_g1 = np.empty_like(raw_prediction) + out_g2 = np.empty_like(raw_prediction) + out_g3 = np.empty_like(raw_prediction) + out_h3 = np.empty_like(raw_prediction) + + l1 = loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l1, + ) + g1 = loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g1, + ) + l2, g2 = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + loss_out=out_l2, + gradient_out=out_g2, + ) + g3, h3 = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g3, + hessian_out=out_h3, + ) + assert_allclose(l1, l2) + assert_array_equal(l1, out_l1) + assert np.shares_memory(l1, out_l1) + assert_array_equal(l2, out_l2) + assert np.shares_memory(l2, out_l2) + assert_allclose(g1, g2) + assert_allclose(g1, g3) + assert_array_equal(g1, out_g1) + assert np.shares_memory(g1, out_g1) + assert_array_equal(g2, out_g2) + assert np.shares_memory(g2, out_g2) + assert_array_equal(g3, out_g3) + assert np.shares_memory(g3, out_g3) + + if hasattr(loss, "gradient_proba"): + assert loss.is_multiclass # only for HalfMultinomialLoss + out_g4 = np.empty_like(raw_prediction) + out_proba = np.empty_like(raw_prediction) + g4, proba = loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + gradient_out=out_g4, + proba_out=out_proba, + ) + assert_allclose(g1, out_g4) + assert_allclose(g1, g4) + assert_allclose(proba, out_proba) + assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", ["ones", "random"]) +def test_sample_weight_multiplies(loss, sample_weight, global_random_seed): + """Test sample weights in loss, gradients and hessians. + + Make sure that passing sample weights to loss, gradient and hessian + computation methods is equivalent to multiplying by the weights. + """ + n_samples = 100 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if sample_weight == "ones": + sample_weight = np.ones(shape=n_samples, dtype=np.float64) + else: + rng = np.random.RandomState(global_random_seed) + sample_weight = rng.normal(size=n_samples).astype(np.float64) + + assert_allclose( + loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ), + sample_weight + * loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ), + ) + + losses, gradient = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ) + losses_sw, gradient_sw = loss.loss_gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + assert_allclose(losses * sample_weight, losses_sw) + if not loss.is_multiclass: + assert_allclose(gradient * sample_weight, gradient_sw) + else: + assert_allclose(gradient * sample_weight[:, None], gradient_sw) + + gradient, hessian = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + ) + gradient_sw, hessian_sw = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + if not loss.is_multiclass: + assert_allclose(gradient * sample_weight, gradient_sw) + assert_allclose(hessian * sample_weight, hessian_sw) + else: + assert_allclose(gradient * sample_weight[:, None], gradient_sw) + assert_allclose(hessian * sample_weight[:, None], hessian_sw) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_graceful_squeezing(loss): + """Test that reshaped raw_prediction gives same results.""" + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=20, + y_bound=(-100, 100), + raw_bound=(-10, 10), + seed=42, + ) + + if raw_prediction.ndim == 1: + raw_prediction_2d = raw_prediction[:, None] + assert_allclose( + loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.loss(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.gradient(y_true=y_true, raw_prediction=raw_prediction), + ) + assert_allclose( + loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d), + loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction), + ) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_of_perfect_prediction(loss, sample_weight): + """Test value of perfect predictions. + + Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to + zero. + """ + if not loss.is_multiclass: + # Use small values such that exp(value) is not nan. + raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10]) + # If link is identity, we must respect the interval of y_pred: + if isinstance(loss.link, IdentityLink): + eps = 1e-10 + low = loss.interval_y_pred.low + if not loss.interval_y_pred.low_inclusive: + low = low + eps + high = loss.interval_y_pred.high + if not loss.interval_y_pred.high_inclusive: + high = high - eps + raw_prediction = np.clip(raw_prediction, low, high) + y_true = loss.link.inverse(raw_prediction) + else: + # HalfMultinomialLoss + y_true = np.arange(loss.n_classes).astype(float) + # raw_prediction with entries -exp(10), but +exp(10) on the diagonal + # this is close enough to np.inf which would produce nan + raw_prediction = np.full( + shape=(loss.n_classes, loss.n_classes), + fill_value=-np.exp(10), + dtype=float, + ) + raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10) + + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + loss_value = loss.loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + constant_term = loss.constant_to_optimal_zero( + y_true=y_true, sample_weight=sample_weight + ) + # Comparing loss_value + constant_term to zero would result in large + # round-off errors. + assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed): + """Test gradients and hessians with numerical derivatives. + + Gradient should equal the numerical derivatives of the loss function. + Hessians should equal the numerical derivatives of gradients. + """ + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if sample_weight == "range": + sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) + + g, h = loss.gradient_hessian( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + + assert g.shape == raw_prediction.shape + assert h.shape == raw_prediction.shape + + if not loss.is_multiclass: + + def loss_func(x): + return loss.loss( + y_true=y_true, + raw_prediction=x, + sample_weight=sample_weight, + ) + + g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6) + assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10) + + def grad_func(x): + return loss.gradient( + y_true=y_true, + raw_prediction=x, + sample_weight=sample_weight, + ) + + h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6) + if loss.approx_hessian: + # TODO: What could we test if loss.approx_hessian? + pass + else: + assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10) + else: + # For multiclass loss, we should only change the predictions of the + # class for which the derivative is taken for, e.g. offset[:, k] = eps + # for class k. + # As a softmax is computed, offsetting the whole array by a constant + # would have no effect on the probabilities, and thus on the loss. + for k in range(loss.n_classes): + + def loss_func(x): + raw = raw_prediction.copy() + raw[:, k] = x + return loss.loss( + y_true=y_true, + raw_prediction=raw, + sample_weight=sample_weight, + ) + + g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5) + assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10) + + def grad_func(x): + raw = raw_prediction.copy() + raw[:, k] = x + return loss.gradient( + y_true=y_true, + raw_prediction=raw, + sample_weight=sample_weight, + )[:, k] + + h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6) + if loss.approx_hessian: + # TODO: What could we test if loss.approx_hessian? + pass + else: + assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10) + + +@pytest.mark.parametrize( + "loss, x0, y_true", + [ + ("squared_error", -2.0, 42), + ("squared_error", 117.0, 1.05), + ("squared_error", 0.0, 0.0), + # The argmin of binomial_loss for y_true=0 and y_true=1 is resp. + # -inf and +inf due to logit, cf. "complete separation". Therefore, we + # use 0 < y_true < 1. + ("binomial_loss", 0.3, 0.1), + ("binomial_loss", -12, 0.2), + ("binomial_loss", 30, 0.9), + ("poisson_loss", 12.0, 1.0), + ("poisson_loss", 0.0, 2.0), + ("poisson_loss", -22.0, 10.0), + ], +) +@skip_if_32bit +def test_derivatives(loss, x0, y_true): + """Test that gradients are zero at the minimum of the loss. + + We check this on a single value/sample using Halley's method with the + first and second order derivatives computed by the Loss instance. + Note that methods of Loss instances operate on arrays while the newton + root finder expects a scalar or a one-element array for this purpose. + """ + loss = _LOSSES[loss](sample_weight=None) + y_true = np.array([y_true], dtype=np.float64) + x0 = np.array([x0], dtype=np.float64) + + def func(x: np.ndarray) -> np.ndarray: + """Compute loss plus constant term. + + The constant term is such that the minimum function value is zero, + which is required by the Newton method. + """ + return loss.loss( + y_true=y_true, raw_prediction=x + ) + loss.constant_to_optimal_zero(y_true=y_true) + + def fprime(x: np.ndarray) -> np.ndarray: + return loss.gradient(y_true=y_true, raw_prediction=x) + + def fprime2(x: np.ndarray) -> np.ndarray: + return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1] + + optimum = newton( + func, + x0=x0, + fprime=fprime, + fprime2=fprime2, + maxiter=100, + tol=5e-8, + ) + + # Need to ravel arrays because assert_allclose requires matching + # dimensions. + y_true = y_true.ravel() + optimum = optimum.ravel() + assert_allclose(loss.link.inverse(optimum), y_true) + assert_allclose(func(optimum), 0, atol=1e-14) + assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +def test_loss_intercept_only(loss, sample_weight): + """Test that fit_intercept_only returns the argmin of the loss. + + Also test that the gradient is zero at the minimum. + """ + n_samples = 50 + if not loss.is_multiclass: + y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples)) + else: + y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes + y_true[::5] = 0 # exceedance of class 0 + + if sample_weight == "range": + sample_weight = np.linspace(0.1, 2, num=n_samples) + + a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) + + # find minimum by optimization + def fun(x): + if not loss.is_multiclass: + raw_prediction = np.full(shape=(n_samples), fill_value=x) + else: + raw_prediction = np.ascontiguousarray( + np.broadcast_to(x, shape=(n_samples, loss.n_classes)) + ) + return loss( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + ) + + if not loss.is_multiclass: + opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100}) + grad = loss.gradient( + y_true=y_true, + raw_prediction=np.full_like(y_true, a), + sample_weight=sample_weight, + ) + assert a.shape == tuple() # scalar + assert a.dtype == y_true.dtype + assert_all_finite(a) + a == approx(opt.x, rel=1e-7) + grad.sum() == approx(0, abs=1e-12) + else: + # The constraint corresponds to sum(raw_prediction) = 0. Without it, we would + # need to apply loss.symmetrize_raw_prediction to opt.x before comparing. + opt = minimize( + fun, + np.zeros((loss.n_classes)), + tol=1e-13, + options={"maxiter": 100}, + method="SLSQP", + constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0), + ) + grad = loss.gradient( + y_true=y_true, + raw_prediction=np.tile(a, (n_samples, 1)), + sample_weight=sample_weight, + ) + assert a.dtype == y_true.dtype + assert_all_finite(a) + assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12) + assert_allclose(grad.sum(axis=0), 0, atol=1e-12) + + +@pytest.mark.parametrize( + "loss, func, random_dist", + [ + (HalfSquaredError(), np.mean, "normal"), + (AbsoluteError(), np.median, "normal"), + (PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"), + (HalfPoissonLoss(), np.mean, "poisson"), + (HalfGammaLoss(), np.mean, "exponential"), + (HalfTweedieLoss(), np.mean, "exponential"), + (HalfBinomialLoss(), np.mean, "binomial"), + ], +) +def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed): + """Test that fit_intercept_only returns the correct functional. + + We test the functional for specific, meaningful distributions, e.g. + squared error estimates the expectation of a probability distribution. + """ + rng = np.random.RandomState(global_random_seed) + if random_dist == "binomial": + y_train = rng.binomial(1, 0.5, size=100) + else: + y_train = getattr(rng, random_dist)(size=100) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + # Make sure baseline prediction is the expected functional=func, e.g. mean + # or median. + assert_all_finite(baseline_prediction) + assert baseline_prediction == approx(loss.link.link(func(y_train))) + assert loss.link.inverse(baseline_prediction) == approx(func(y_train)) + if isinstance(loss, IdentityLink): + assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction) + + # Test baseline at boundary + if loss.interval_y_true.low_inclusive: + y_train.fill(loss.interval_y_true.low) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert_all_finite(baseline_prediction) + if loss.interval_y_true.high_inclusive: + y_train.fill(loss.interval_y_true.high) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert_all_finite(baseline_prediction) + + +def test_multinomial_loss_fit_intercept_only(): + """Test that fit_intercept_only returns the mean functional for CCE.""" + rng = np.random.RandomState(0) + n_classes = 4 + loss = HalfMultinomialLoss(n_classes=n_classes) + # Same logic as test_specific_fit_intercept_only. Here inverse link + # function = softmax and link function = log - symmetry term. + y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert baseline_prediction.shape == (n_classes,) + p = np.zeros(n_classes, dtype=y_train.dtype) + for k in range(n_classes): + p[k] = (y_train == k).mean() + assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p))) + assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :])) + + for y_train in (np.zeros(shape=10), np.ones(shape=10)): + y_train = y_train.astype(np.float64) + baseline_prediction = loss.fit_intercept_only(y_true=y_train) + assert baseline_prediction.dtype == y_train.dtype + assert_all_finite(baseline_prediction) + + +def test_binomial_and_multinomial_loss(global_random_seed): + """Test that multinomial loss with n_classes = 2 is the same as binomial loss.""" + rng = np.random.RandomState(global_random_seed) + n_samples = 20 + binom = HalfBinomialLoss() + multinom = HalfMultinomialLoss(n_classes=2) + y_train = rng.randint(0, 2, size=n_samples).astype(np.float64) + raw_prediction = rng.normal(size=n_samples) + raw_multinom = np.empty((n_samples, 2)) + raw_multinom[:, 0] = -0.5 * raw_prediction + raw_multinom[:, 1] = 0.5 * raw_prediction + assert_allclose( + binom.loss(y_true=y_train, raw_prediction=raw_prediction), + multinom.loss(y_true=y_train, raw_prediction=raw_multinom), + ) + + +@pytest.mark.parametrize("y_true", (np.array([0.0, 0, 0]), np.array([1.0, 1, 1]))) +@pytest.mark.parametrize("y_pred", (np.array([-5.0, -5, -5]), np.array([3.0, 3, 3]))) +def test_binomial_vs_alternative_formulation(y_true, y_pred, global_dtype): + """Test that both formulations of the binomial deviance agree. + + Often, the binomial deviance or log loss is written in terms of a variable + z in {-1, +1}, but we use y in {0, 1}, hence z = 2 * y - 1. + ESL II Eq. (10.18): + + -loglike(z, f) = log(1 + exp(-2 * z * f)) + + Note: + - ESL 2*f = raw_prediction, hence the factor 2 of ESL disappears. + - Deviance = -2*loglike + .., but HalfBinomialLoss is half of the + deviance, hence the factor of 2 cancels in the comparison. + """ + + def alt_loss(y, raw_pred): + z = 2 * y - 1 + return np.mean(np.log(1 + np.exp(-z * raw_pred))) + + def alt_gradient(y, raw_pred): + # alternative gradient formula according to ESL + z = 2 * y - 1 + return -z / (1 + np.exp(z * raw_pred)) + + bin_loss = HalfBinomialLoss() + + y_true = y_true.astype(global_dtype) + y_pred = y_pred.astype(global_dtype) + datum = (y_true, y_pred) + + assert bin_loss(*datum) == approx(alt_loss(*datum)) + assert_allclose(bin_loss.gradient(*datum), alt_gradient(*datum)) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_predict_proba(loss, global_random_seed): + """Test that predict_proba and gradient_proba work as expected.""" + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=global_random_seed, + ) + + if hasattr(loss, "predict_proba"): + proba = loss.predict_proba(raw_prediction) + assert proba.shape == (n_samples, loss.n_classes) + assert np.sum(proba, axis=1) == approx(1, rel=1e-11) + + if hasattr(loss, "gradient_proba"): + for grad, proba in ( + (None, None), + (None, np.empty_like(raw_prediction)), + (np.empty_like(raw_prediction), None), + (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), + ): + grad, proba = loss.gradient_proba( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + gradient_out=grad, + proba_out=proba, + ) + assert proba.shape == (n_samples, loss.n_classes) + assert np.sum(proba, axis=1) == approx(1, rel=1e-11) + assert_allclose( + grad, + loss.gradient( + y_true=y_true, + raw_prediction=raw_prediction, + sample_weight=None, + gradient_out=None, + ), + ) + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize("sample_weight", [None, "range"]) +@pytest.mark.parametrize("dtype", (np.float32, np.float64)) +@pytest.mark.parametrize("order", ("C", "F")) +def test_init_gradient_and_hessians(loss, sample_weight, dtype, order): + """Test that init_gradient_and_hessian works as expected. + + passing sample_weight to a loss correctly influences the constant_hessian + attribute, and consequently the shape of the hessian array. + """ + n_samples = 5 + if sample_weight == "range": + sample_weight = np.ones(n_samples) + loss = loss(sample_weight=sample_weight) + gradient, hessian = loss.init_gradient_and_hessian( + n_samples=n_samples, + dtype=dtype, + order=order, + ) + if loss.constant_hessian: + assert gradient.shape == (n_samples,) + assert hessian.shape == (1,) + elif loss.is_multiclass: + assert gradient.shape == (n_samples, loss.n_classes) + assert hessian.shape == (n_samples, loss.n_classes) + else: + assert hessian.shape == (n_samples,) + assert hessian.shape == (n_samples,) + + assert gradient.dtype == dtype + assert hessian.dtype == dtype + + if order == "C": + assert gradient.flags.c_contiguous + assert hessian.flags.c_contiguous + else: + assert gradient.flags.f_contiguous + assert hessian.flags.f_contiguous + + +@pytest.mark.parametrize("loss", ALL_LOSSES) +@pytest.mark.parametrize( + "params, err_msg", + [ + ( + {"dtype": np.int64}, + f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.", + ), + ], +) +def test_init_gradient_and_hessian_raises(loss, params, err_msg): + """Test that init_gradient_and_hessian raises errors for invalid input.""" + loss = loss() + with pytest.raises((ValueError, TypeError), match=err_msg): + gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params) + + +@pytest.mark.parametrize( + "loss, params, err_type, err_msg", + [ + ( + PinballLoss, + {"quantile": None}, + TypeError, + "quantile must be an instance of float, not NoneType.", + ), + ( + PinballLoss, + {"quantile": 0}, + ValueError, + "quantile == 0, must be > 0.", + ), + (PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."), + ( + HuberLoss, + {"quantile": None}, + TypeError, + "quantile must be an instance of float, not NoneType.", + ), + ( + HuberLoss, + {"quantile": 0}, + ValueError, + "quantile == 0, must be > 0.", + ), + (HuberLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."), + ], +) +def test_loss_init_parameter_validation(loss, params, err_type, err_msg): + """Test that loss raises errors for invalid input.""" + with pytest.raises(err_type, match=err_msg): + loss(**params) + + +@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) +def test_loss_pickle(loss): + """Test that losses can be pickled.""" + n_samples = 20 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=loss, + n_samples=n_samples, + y_bound=(-100, 100), + raw_bound=(-5, 5), + seed=42, + ) + pickled_loss = pickle.dumps(loss) + unpickled_loss = pickle.loads(pickled_loss) + assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx( + unpickled_loss(y_true=y_true, raw_prediction=raw_prediction) + ) + + +@pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3]) +def test_tweedie_log_identity_consistency(p): + """Test for identical losses when only the link function is different.""" + half_tweedie_log = HalfTweedieLoss(power=p) + half_tweedie_identity = HalfTweedieLossIdentity(power=p) + n_samples = 10 + y_true, raw_prediction = random_y_true_raw_prediction( + loss=half_tweedie_log, n_samples=n_samples, seed=42 + ) + y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction) + + # Let's compare the loss values, up to some constant term that is dropped + # in HalfTweedieLoss but not in HalfTweedieLossIdentity. + loss_log = half_tweedie_log.loss( + y_true=y_true, raw_prediction=raw_prediction + ) + half_tweedie_log.constant_to_optimal_zero(y_true) + loss_identity = half_tweedie_identity.loss( + y_true=y_true, raw_prediction=y_pred + ) + half_tweedie_identity.constant_to_optimal_zero(y_true) + # Note that HalfTweedieLoss ignores different constant terms than + # HalfTweedieLossIdentity. Constant terms means terms not depending on + # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses + # give the same values. + assert_allclose(loss_log, loss_identity) + + # For gradients and hessians, the constant terms do not matter. We have, however, + # to account for the chain rule, i.e. with x=raw_prediction + # gradient_log(x) = d/dx loss_log(x) + # = d/dx loss_identity(exp(x)) + # = exp(x) * gradient_identity(exp(x)) + # Similarly, + # hessian_log(x) = exp(x) * gradient_identity(exp(x)) + # + exp(x)**2 * hessian_identity(x) + gradient_log, hessian_log = half_tweedie_log.gradient_hessian( + y_true=y_true, raw_prediction=raw_prediction + ) + gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian( + y_true=y_true, raw_prediction=y_pred + ) + assert_allclose(gradient_log, y_pred * gradient_identity) + assert_allclose( + hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b137cdf9e07f2f275c5c78c7ab6ab289c23413f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__init__.py @@ -0,0 +1,20 @@ +"""Meta-estimators for building composite models with transformers + +In addition to its current contents, this module will eventually be home to +refurbished versions of Pipeline and FeatureUnion. + +""" + +from ._column_transformer import ( + ColumnTransformer, + make_column_selector, + make_column_transformer, +) +from ._target import TransformedTargetRegressor + +__all__ = [ + "ColumnTransformer", + "make_column_transformer", + "TransformedTargetRegressor", + "make_column_selector", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d2242665d73b769f28a7a7458ea37f4fe267ff8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed6ed12ae7578816da67879c51d642813ec56676 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85e7d7e0695a138d6cfa8e7ffcc13a12ce03a753 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..78b66df28c94c4d6c147d40b918824f02c317345 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py @@ -0,0 +1,1463 @@ +""" +The :mod:`sklearn.compose._column_transformer` module implements utilities +to work with heterogeneous data and to apply different transformers to +different columns. +""" + +# Author: Andreas Mueller +# Joris Van den Bossche +# License: BSD +import warnings +from collections import Counter +from itertools import chain +from numbers import Integral, Real + +import numpy as np +from scipy import sparse + +from ..base import TransformerMixin, _fit_context, clone +from ..pipeline import _fit_transform_one, _name_estimators, _transform_one +from ..preprocessing import FunctionTransformer +from ..utils import Bunch, _get_column_indices, _safe_indexing +from ..utils._estimator_html_repr import _VisualBlock +from ..utils._metadata_requests import METHODS +from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions +from ..utils._set_output import ( + _get_container_adapter, + _get_output_config, + _safe_set_output, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import _BaseComposition +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_feature_names_in, + _get_feature_names, + _is_pandas_df, + _num_samples, + check_array, + check_is_fitted, +) + +__all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"] + + +_ERR_MSG_1DCOLUMN = ( + "1D data passed to a transformer that expects 2D data. " + "Try to specify the column selection as a list of one " + "item instead of a scalar." +) + + +class ColumnTransformer(TransformerMixin, _BaseComposition): + """Applies transformers to columns of an array or pandas DataFrame. + + This estimator allows different columns or column subsets of the input + to be transformed separately and the features generated by each transformer + will be concatenated to form a single feature space. + This is useful for heterogeneous or columnar data, to combine several + feature extraction mechanisms or transformations into a single transformer. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + transformers : list of tuples + List of (name, transformer, columns) tuples specifying the + transformer objects to be applied to subsets of the data. + + name : str + Like in Pipeline and FeatureUnion, this allows the transformer and + its parameters to be set using ``set_params`` and searched in grid + search. + transformer : {'drop', 'passthrough'} or estimator + Estimator must support :term:`fit` and :term:`transform`. + Special-cased strings 'drop' and 'passthrough' are accepted as + well, to indicate to drop the columns or to pass them through + untransformed, respectively. + columns : str, array-like of str, int, array-like of int, \ + array-like of bool, slice or callable + Indexes the data on its second axis. Integers are interpreted as + positional columns, while strings can reference DataFrame columns + by name. A scalar string or int should be used where + ``transformer`` expects X to be a 1d array-like (vector), + otherwise a 2d array will be passed to the transformer. + A callable is passed the input data `X` and can return any of the + above. To select multiple columns by name or dtype, you can use + :obj:`make_column_selector`. + + remainder : {'drop', 'passthrough'} or estimator, default='drop' + By default, only the specified columns in `transformers` are + transformed and combined in the output, and the non-specified + columns are dropped. (default of ``'drop'``). + By specifying ``remainder='passthrough'``, all remaining columns that + were not specified in `transformers`, but present in the data passed + to `fit` will be automatically passed through. This subset of columns + is concatenated with the output of the transformers. For dataframes, + extra columns not seen during `fit` will be excluded from the output + of `transform`. + By setting ``remainder`` to be an estimator, the remaining + non-specified columns will use the ``remainder`` estimator. The + estimator must support :term:`fit` and :term:`transform`. + Note that using this feature requires that the DataFrame columns + input at :term:`fit` and :term:`transform` have identical order. + + sparse_threshold : float, default=0.3 + If the output of the different transformers contains sparse matrices, + these will be stacked as a sparse matrix if the overall density is + lower than this value. Use ``sparse_threshold=0`` to always return + dense. When the transformed output consists of all dense data, the + stacked result will be dense, and this keyword will be ignored. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + transformer_weights : dict, default=None + Multiplicative weights for features per transformer. The output of the + transformer is multiplied by these weights. Keys are transformer names, + values the weights. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix + all feature names with the name of the transformer that generated that + feature. + If False, :meth:`ColumnTransformer.get_feature_names_out` will not + prefix any feature names and will error if feature names are not + unique. + + .. versionadded:: 1.0 + + Attributes + ---------- + transformers_ : list + The collection of fitted transformers as tuples of (name, + fitted_transformer, column). `fitted_transformer` can be an estimator, + or `'drop'`; `'passthrough'` is replaced with an equivalent + :class:`~sklearn.preprocessing.FunctionTransformer`. In case there were + no columns selected, this will be the unfitted transformer. If there + are remaining columns, the final element is a tuple of the form: + ('remainder', transformer, remaining_columns) corresponding to the + ``remainder`` parameter. If there are remaining columns, then + ``len(transformers_)==len(transformers)+1``, otherwise + ``len(transformers_)==len(transformers)``. + + named_transformers_ : :class:`~sklearn.utils.Bunch` + Read-only attribute to access any transformer by given name. + Keys are transformer names and values are the fitted transformer + objects. + + sparse_output_ : bool + Boolean flag indicating whether the output of ``transform`` is a + sparse matrix or a dense numpy array, which depends on the output + of the individual transformers and the `sparse_threshold` keyword. + + output_indices_ : dict + A dictionary from each transformer name to a slice, where the slice + corresponds to indices in the transformed output. This is useful to + inspect which transformer is responsible for which transformed + feature(s). + + .. versionadded:: 1.0 + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying transformers expose such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + make_column_transformer : Convenience function for + combining the outputs of multiple transformer objects applied to + column subsets of the original feature space. + make_column_selector : Convenience function for selecting + columns based on datatype or the columns name with a regex pattern. + + Notes + ----- + The order of the columns in the transformed feature matrix follows the + order of how the columns are specified in the `transformers` list. + Columns of the original feature matrix that are not specified are + dropped from the resulting transformed feature matrix, unless specified + in the `passthrough` keyword. Those columns specified with `passthrough` + are added at the right to the output of the transformers. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.compose import ColumnTransformer + >>> from sklearn.preprocessing import Normalizer + >>> ct = ColumnTransformer( + ... [("norm1", Normalizer(norm='l1'), [0, 1]), + ... ("norm2", Normalizer(norm='l1'), slice(2, 4))]) + >>> X = np.array([[0., 1., 2., 2.], + ... [1., 1., 0., 1.]]) + >>> # Normalizer scales each row of X to unit norm. A separate scaling + >>> # is applied for the two first and two last elements of each + >>> # row independently. + >>> ct.fit_transform(X) + array([[0. , 1. , 0.5, 0.5], + [0.5, 0.5, 0. , 1. ]]) + + :class:`ColumnTransformer` can be configured with a transformer that requires + a 1d array by setting the column to a string: + + >>> from sklearn.feature_extraction.text import CountVectorizer + >>> from sklearn.preprocessing import MinMaxScaler + >>> import pandas as pd # doctest: +SKIP + >>> X = pd.DataFrame({ + ... "documents": ["First item", "second one here", "Is this the last?"], + ... "width": [3, 4, 5], + ... }) # doctest: +SKIP + >>> # "documents" is a string which configures ColumnTransformer to + >>> # pass the documents column as a 1d array to the CountVectorizer + >>> ct = ColumnTransformer( + ... [("text_preprocess", CountVectorizer(), "documents"), + ... ("num_preprocess", MinMaxScaler(), ["width"])]) + >>> X_trans = ct.fit_transform(X) # doctest: +SKIP + + For a more detailed example of usage, see + :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`. + """ + + _required_parameters = ["transformers"] + + _parameter_constraints: dict = { + "transformers": [list, Hidden(tuple)], + "remainder": [ + StrOptions({"drop", "passthrough"}), + HasMethods(["fit", "transform"]), + HasMethods(["fit_transform", "transform"]), + ], + "sparse_threshold": [Interval(Real, 0, 1, closed="both")], + "n_jobs": [Integral, None], + "transformer_weights": [dict, None], + "verbose": ["verbose"], + "verbose_feature_names_out": ["boolean"], + } + + def __init__( + self, + transformers, + *, + remainder="drop", + sparse_threshold=0.3, + n_jobs=None, + transformer_weights=None, + verbose=False, + verbose_feature_names_out=True, + ): + self.transformers = transformers + self.remainder = remainder + self.sparse_threshold = sparse_threshold + self.n_jobs = n_jobs + self.transformer_weights = transformer_weights + self.verbose = verbose + self.verbose_feature_names_out = verbose_feature_names_out + + @property + def _transformers(self): + """ + Internal list of transformer only containing the name and + transformers, dropping the columns. + + DO NOT USE: This is for the implementation of get_params via + BaseComposition._get_params which expects lists of tuples of len 2. + + To iterate through the transformers, use ``self._iter`` instead. + """ + try: + return [(name, trans) for name, trans, _ in self.transformers] + except (TypeError, ValueError): + return self.transformers + + @_transformers.setter + def _transformers(self, value): + """DO NOT USE: This is for the implementation of set_params via + BaseComposition._get_params which gives lists of tuples of len 2. + """ + try: + self.transformers = [ + (name, trans, col) + for ((name, trans), (_, _, col)) in zip(value, self.transformers) + ] + except (TypeError, ValueError): + self.transformers = value + + def set_output(self, *, transform=None): + """Set the output container when `"transform"` and `"fit_transform"` are called. + + Calling `set_output` will set the output of all estimators in `transformers` + and `transformers_`. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + super().set_output(transform=transform) + + transformers = ( + trans + for _, trans, _ in chain( + self.transformers, getattr(self, "transformers_", []) + ) + if trans not in {"passthrough", "drop"} + ) + for trans in transformers: + _safe_set_output(trans, transform=transform) + + if self.remainder not in {"passthrough", "drop"}: + _safe_set_output(self.remainder, transform=transform) + + return self + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Returns the parameters given in the constructor as well as the + estimators contained within the `transformers` of the + `ColumnTransformer`. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + return self._get_params("_transformers", deep=deep) + + def set_params(self, **kwargs): + """Set the parameters of this estimator. + + Valid parameter keys can be listed with ``get_params()``. Note that you + can directly set the parameters of the estimators contained in + `transformers` of `ColumnTransformer`. + + Parameters + ---------- + **kwargs : dict + Estimator parameters. + + Returns + ------- + self : ColumnTransformer + This estimator. + """ + self._set_params("_transformers", **kwargs) + return self + + def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns): + """ + Generate (name, trans, column, weight) tuples. + + + Parameters + ---------- + fitted : bool + If True, use the fitted transformers (``self.transformers_``) to + iterate through transformers, else use the transformers passed by + the user (``self.transformers``). + + column_as_labels : bool + If True, columns are returned as string labels. If False, columns + are returned as they were given by the user. This can only be True + if the ``ColumnTransformer`` is already fitted. + + skip_drop : bool + If True, 'drop' transformers are filtered out. + + skip_empty_columns : bool + If True, transformers with empty selected columns are filtered out. + + Yields + ------ + A generator of tuples containing: + - name : the name of the transformer + - transformer : the transformer object + - columns : the columns for that transformer + - weight : the weight of the transformer + """ + if fitted: + transformers = self.transformers_ + else: + # interleave the validated column specifiers + transformers = [ + (name, trans, column) + for (name, trans, _), column in zip(self.transformers, self._columns) + ] + # add transformer tuple for remainder + if self._remainder[2]: + transformers = chain(transformers, [self._remainder]) + get_weight = (self.transformer_weights or {}).get + + for name, trans, columns in transformers: + if skip_drop and trans == "drop": + continue + if skip_empty_columns and _is_empty_column_selection(columns): + continue + + if column_as_labels: + # Convert all columns to using their string labels + columns_is_scalar = np.isscalar(columns) + + indices = self._transformer_to_input_indices[name] + columns = self.feature_names_in_[indices] + + if columns_is_scalar: + # selection is done with one dimension + columns = columns[0] + + yield (name, trans, columns, get_weight(name)) + + def _validate_transformers(self): + """Validate names of transformers and the transformers themselves. + + This checks whether given transformers have the required methods, i.e. + `fit` or `fit_transform` and `transform` implemented. + """ + if not self.transformers: + return + + names, transformers, _ = zip(*self.transformers) + + # validate names + self._validate_names(names) + + # validate estimators + for t in transformers: + if t in ("drop", "passthrough"): + continue + if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( + t, "transform" + ): + # Used to validate the transformers in the `transformers` list + raise TypeError( + "All estimators should implement fit and " + "transform, or can be 'drop' or 'passthrough' " + "specifiers. '%s' (type %s) doesn't." % (t, type(t)) + ) + + def _validate_column_callables(self, X): + """ + Converts callable column specifications. + + This stores a dictionary of the form `{step_name: column_indices}` and + calls the `columns` on `X` if `columns` is a callable for a given + transformer. + + The results are then stored in `self._transformer_to_input_indices`. + """ + all_columns = [] + transformer_to_input_indices = {} + for name, _, columns in self.transformers: + if callable(columns): + columns = columns(X) + all_columns.append(columns) + transformer_to_input_indices[name] = _get_column_indices(X, columns) + + self._columns = all_columns + self._transformer_to_input_indices = transformer_to_input_indices + + def _validate_remainder(self, X): + """ + Validates ``remainder`` and defines ``_remainder`` targeting + the remaining columns. + """ + cols = set(chain(*self._transformer_to_input_indices.values())) + remaining = sorted(set(range(self.n_features_in_)) - cols) + self._remainder = ("remainder", self.remainder, remaining) + self._transformer_to_input_indices["remainder"] = remaining + + @property + def named_transformers_(self): + """Access the fitted transformer by name. + + Read-only attribute to access any transformer by given name. + Keys are transformer names and values are the fitted transformer + objects. + """ + # Use Bunch object to improve autocomplete + return Bunch(**{name: trans for name, trans, _ in self.transformers_}) + + def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in): + """Gets feature names of transformer. + + Used in conjunction with self._iter(fitted=True) in get_feature_names_out. + """ + column_indices = self._transformer_to_input_indices[name] + names = feature_names_in[column_indices] + # An actual transformer + if not hasattr(trans, "get_feature_names_out"): + raise AttributeError( + f"Transformer {name} (type {type(trans).__name__}) does " + "not provide get_feature_names_out." + ) + return trans.get_feature_names_out(names) + + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Input features. + + - If `input_features` is `None`, then `feature_names_in_` is + used as feature names in. If `feature_names_in_` is not defined, + then the following input feature names are generated: + `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. + - If `input_features` is an array-like, then `input_features` must + match `feature_names_in_` if `feature_names_in_` is defined. + + Returns + ------- + feature_names_out : ndarray of str objects + Transformed feature names. + """ + check_is_fitted(self) + input_features = _check_feature_names_in(self, input_features) + + # List of tuples (name, feature_names_out) + transformer_with_feature_names_out = [] + for name, trans, *_ in self._iter( + fitted=True, + column_as_labels=False, + skip_empty_columns=True, + skip_drop=True, + ): + feature_names_out = self._get_feature_name_out_for_transformer( + name, trans, input_features + ) + if feature_names_out is None: + continue + transformer_with_feature_names_out.append((name, feature_names_out)) + + if not transformer_with_feature_names_out: + # No feature names + return np.array([], dtype=object) + + return self._add_prefix_for_feature_names_out( + transformer_with_feature_names_out + ) + + def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out): + """Add prefix for feature names out that includes the transformer names. + + Parameters + ---------- + transformer_with_feature_names_out : list of tuples of (str, array-like of str) + The tuple consistent of the transformer's name and its feature names out. + + Returns + ------- + feature_names_out : ndarray of shape (n_features,), dtype=str + Transformed feature names. + """ + if self.verbose_feature_names_out: + # Prefix the feature names out with the transformers name + names = list( + chain.from_iterable( + (f"{name}__{i}" for i in feature_names_out) + for name, feature_names_out in transformer_with_feature_names_out + ) + ) + return np.asarray(names, dtype=object) + + # verbose_feature_names_out is False + # Check that names are all unique without a prefix + feature_names_count = Counter( + chain.from_iterable(s for _, s in transformer_with_feature_names_out) + ) + top_6_overlap = [ + name for name, count in feature_names_count.most_common(6) if count > 1 + ] + top_6_overlap.sort() + if top_6_overlap: + if len(top_6_overlap) == 6: + # There are more than 5 overlapping names, we only show the 5 + # of the feature names + names_repr = str(top_6_overlap[:5])[:-1] + ", ...]" + else: + names_repr = str(top_6_overlap) + raise ValueError( + f"Output feature names: {names_repr} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + + return np.concatenate( + [name for _, name in transformer_with_feature_names_out], + ) + + def _update_fitted_transformers(self, transformers): + """Set self.transformers_ from given transformers. + + Parameters + ---------- + transformers : list of estimators + The fitted estimators as the output of + `self._call_func_on_transformers(func=_fit_transform_one, ...)`. + That function doesn't include 'drop' or transformers for which no + column is selected. 'drop' is kept as is, and for the no-column + transformers the unfitted transformer is put in + `self.transformers_`. + """ + # transformers are fitted; excludes 'drop' cases + fitted_transformers = iter(transformers) + transformers_ = [] + + for name, old, column, _ in self._iter( + fitted=False, + column_as_labels=False, + skip_drop=False, + skip_empty_columns=False, + ): + if old == "drop": + trans = "drop" + elif _is_empty_column_selection(column): + trans = old + else: + trans = next(fitted_transformers) + transformers_.append((name, trans, column)) + + # sanity check that transformers is exhausted + assert not list(fitted_transformers) + self.transformers_ = transformers_ + + def _validate_output(self, result): + """ + Ensure that the output of each transformer is 2D. Otherwise + hstack can raise an error or produce incorrect results. + """ + names = [ + name + for name, _, _, _ in self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ] + for Xs, name in zip(result, names): + if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"): + raise ValueError( + "The output of the '{0}' transformer should be 2D (numpy array, " + "scipy sparse array, dataframe).".format(name) + ) + if _get_output_config("transform", self)["dense"] == "pandas": + return + try: + import pandas as pd + except ImportError: + return + for Xs, name in zip(result, names): + if not _is_pandas_df(Xs): + continue + for col_name, dtype in Xs.dtypes.to_dict().items(): + if getattr(dtype, "na_value", None) is not pd.NA: + continue + if pd.NA not in Xs[col_name].values: + continue + class_name = self.__class__.__name__ + # TODO(1.6): replace warning with ValueError + warnings.warn( + ( + f"The output of the '{name}' transformer for column" + f" '{col_name}' has dtype {dtype} and uses pandas.NA to" + " represent null values. Storing this output in a numpy array" + " can cause errors in downstream scikit-learn estimators, and" + " inefficiencies. Starting with scikit-learn version 1.6, this" + " will raise a ValueError. To avoid this problem you can (i)" + " store the output in a pandas DataFrame by using" + f" {class_name}.set_output(transform='pandas') or (ii) modify" + f" the input data or the '{name}' transformer to avoid the" + " presence of pandas.NA (for example by using" + " pandas.DataFrame.astype)." + ), + FutureWarning, + ) + + def _record_output_indices(self, Xs): + """ + Record which transformer produced which column. + """ + idx = 0 + self.output_indices_ = {} + + for transformer_idx, (name, _, _, _) in enumerate( + self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ): + n_columns = Xs[transformer_idx].shape[1] + self.output_indices_[name] = slice(idx, idx + n_columns) + idx += n_columns + + # `_iter` only generates transformers that have a non empty + # selection. Here we set empty slices for transformers that + # generate no output, which are safe for indexing + all_names = [t[0] for t in self.transformers] + ["remainder"] + for name in all_names: + if name not in self.output_indices_: + self.output_indices_[name] = slice(0, 0) + + def _log_message(self, name, idx, total): + if not self.verbose: + return None + return "(%d of %d) Processing %s" % (idx, total, name) + + def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params): + """ + Private function to fit and/or transform on demand. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + The data to be used in fit and/or transform. + + y : array-like of shape (n_samples,) + Targets. + + func : callable + Function to call, which can be _fit_transform_one or + _transform_one. + + column_as_labels : bool + Used to iterate through transformers. If True, columns are returned + as strings. If False, columns are returned as they were given by + the user. Can be True only if the ``ColumnTransformer`` is already + fitted. + + routed_params : dict + The routed parameters as the output from ``process_routing``. + + Returns + ------- + Return value (transformers and/or transformed X data) depends + on the passed function. + """ + if func is _fit_transform_one: + fitted = False + else: # func is _transform_one + fitted = True + + transformers = list( + self._iter( + fitted=fitted, + column_as_labels=column_as_labels, + skip_drop=True, + skip_empty_columns=True, + ) + ) + try: + jobs = [] + for idx, (name, trans, column, weight) in enumerate(transformers, start=1): + if func is _fit_transform_one: + if trans == "passthrough": + output_config = _get_output_config("transform", self) + trans = FunctionTransformer( + accept_sparse=True, + check_inverse=False, + feature_names_out="one-to-one", + ).set_output(transform=output_config["dense"]) + + extra_args = dict( + message_clsname="ColumnTransformer", + message=self._log_message(name, idx, len(transformers)), + ) + else: # func is _transform_one + extra_args = {} + jobs.append( + delayed(func)( + transformer=clone(trans) if not fitted else trans, + X=_safe_indexing(X, column, axis=1), + y=y, + weight=weight, + **extra_args, + params=routed_params[name], + ) + ) + + return Parallel(n_jobs=self.n_jobs)(jobs) + + except ValueError as e: + if "Expected 2D array, got 1D array instead" in str(e): + raise ValueError(_ERR_MSG_1DCOLUMN) from e + else: + raise + + def fit(self, X, y=None, **params): + """Fit all transformers using X. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + Input data, of which specified subsets are used to fit the + transformers. + + y : array-like of shape (n_samples,...), default=None + Targets for supervised learning. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``fit`` and + ``transform`` methods. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + self : ColumnTransformer + This estimator. + """ + _raise_for_params(params, self, "fit") + # we use fit_transform to make sure to set sparse_output_ (for which we + # need the transformed data) to have consistent output type in predict + self.fit_transform(X, y=y, **params) + return self + + @_fit_context( + # estimators in ColumnTransformer.transformers are not validated yet + prefer_skip_nested_validation=False + ) + def fit_transform(self, X, y=None, **params): + """Fit all transformers, transform the data and concatenate results. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + Input data, of which specified subsets are used to fit the + transformers. + + y : array-like of shape (n_samples,), default=None + Targets for supervised learning. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``fit`` and + ``transform`` methods. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + X_t : {array-like, sparse matrix} of \ + shape (n_samples, sum_n_components) + Horizontally stacked results of transformers. sum_n_components is the + sum of n_components (output dimension) over transformers. If + any result is a sparse matrix, everything will be converted to + sparse matrices. + """ + _raise_for_params(params, self, "fit_transform") + self._check_feature_names(X, reset=True) + + X = _check_X(X) + # set n_features_in_ attribute + self._check_n_features(X, reset=True) + self._validate_transformers() + n_samples = _num_samples(X) + + self._validate_column_callables(X) + self._validate_remainder(X) + + if _routing_enabled(): + routed_params = process_routing(self, "fit_transform", **params) + else: + routed_params = self._get_empty_routing() + + result = self._call_func_on_transformers( + X, + y, + _fit_transform_one, + column_as_labels=False, + routed_params=routed_params, + ) + + if not result: + self._update_fitted_transformers([]) + # All transformers are None + return np.zeros((n_samples, 0)) + + Xs, transformers = zip(*result) + + # determine if concatenated output will be sparse or not + if any(sparse.issparse(X) for X in Xs): + nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs) + total = sum( + X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs + ) + density = nnz / total + self.sparse_output_ = density < self.sparse_threshold + else: + self.sparse_output_ = False + + self._update_fitted_transformers(transformers) + self._validate_output(Xs) + self._record_output_indices(Xs) + + return self._hstack(list(Xs), n_samples=n_samples) + + def transform(self, X, **params): + """Transform X separately by each transformer, concatenate results. + + Parameters + ---------- + X : {array-like, dataframe} of shape (n_samples, n_features) + The data to be transformed by subset. + + **params : dict, default=None + Parameters to be passed to the underlying transformers' ``transform`` + method. + + You can only pass this if metadata routing is enabled, which you + can enable using ``sklearn.set_config(enable_metadata_routing=True)``. + + .. versionadded:: 1.4 + + Returns + ------- + X_t : {array-like, sparse matrix} of \ + shape (n_samples, sum_n_components) + Horizontally stacked results of transformers. sum_n_components is the + sum of n_components (output dimension) over transformers. If + any result is a sparse matrix, everything will be converted to + sparse matrices. + """ + _raise_for_params(params, self, "transform") + check_is_fitted(self) + X = _check_X(X) + + # If ColumnTransformer is fit using a dataframe, and now a dataframe is + # passed to be transformed, we select columns by name instead. This + # enables the user to pass X at transform time with extra columns which + # were not present in fit time, and the order of the columns doesn't + # matter. + fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and ( + _is_pandas_df(X) or hasattr(X, "__dataframe__") + ) + + n_samples = _num_samples(X) + column_names = _get_feature_names(X) + + if fit_dataframe_and_transform_dataframe: + named_transformers = self.named_transformers_ + # check that all names seen in fit are in transform, unless + # they were dropped + non_dropped_indices = [ + ind + for name, ind in self._transformer_to_input_indices.items() + if name in named_transformers and named_transformers[name] != "drop" + ] + + all_indices = set(chain(*non_dropped_indices)) + all_names = set(self.feature_names_in_[ind] for ind in all_indices) + + diff = all_names - set(column_names) + if diff: + raise ValueError(f"columns are missing: {diff}") + else: + # ndarray was used for fitting or transforming, thus we only + # check that n_features_in_ is consistent + self._check_n_features(X, reset=False) + + if _routing_enabled(): + routed_params = process_routing(self, "transform", **params) + else: + routed_params = self._get_empty_routing() + + Xs = self._call_func_on_transformers( + X, + None, + _transform_one, + column_as_labels=fit_dataframe_and_transform_dataframe, + routed_params=routed_params, + ) + self._validate_output(Xs) + + if not Xs: + # All transformers are None + return np.zeros((n_samples, 0)) + + return self._hstack(list(Xs), n_samples=n_samples) + + def _hstack(self, Xs, *, n_samples): + """Stacks Xs horizontally. + + This allows subclasses to control the stacking behavior, while reusing + everything else from ColumnTransformer. + + Parameters + ---------- + Xs : list of {array-like, sparse matrix, dataframe} + The container to concatenate. + n_samples : int + The number of samples in the input data to checking the transformation + consistency. + """ + if self.sparse_output_: + try: + # since all columns should be numeric before stacking them + # in a sparse matrix, `check_array` is used for the + # dtype conversion if necessary. + converted_Xs = [ + check_array(X, accept_sparse=True, force_all_finite=False) + for X in Xs + ] + except ValueError as e: + raise ValueError( + "For a sparse output, all columns should " + "be a numeric or convertible to a numeric." + ) from e + + return sparse.hstack(converted_Xs).tocsr() + else: + Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs] + adapter = _get_container_adapter("transform", self) + if adapter and all(adapter.is_supported_container(X) for X in Xs): + # rename before stacking as it avoids to error on temporary duplicated + # columns + transformer_names = [ + t[0] + for t in self._iter( + fitted=True, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + ] + feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0] + if self.verbose_feature_names_out: + # `_add_prefix_for_feature_names_out` takes care about raising + # an error if there are duplicated columns. + feature_names_outs = self._add_prefix_for_feature_names_out( + list(zip(transformer_names, feature_names_outs)) + ) + else: + # check for duplicated columns and raise if any + feature_names_outs = list(chain.from_iterable(feature_names_outs)) + feature_names_count = Counter(feature_names_outs) + if any(count > 1 for count in feature_names_count.values()): + duplicated_feature_names = sorted( + name + for name, count in feature_names_count.items() + if count > 1 + ) + err_msg = ( + "Duplicated feature names found before concatenating the" + " outputs of the transformers:" + f" {duplicated_feature_names}.\n" + ) + for transformer_name, X in zip(transformer_names, Xs): + if X.shape[1] == 0: + continue + dup_cols_in_transformer = sorted( + set(X.columns).intersection(duplicated_feature_names) + ) + if len(dup_cols_in_transformer): + err_msg += ( + f"Transformer {transformer_name} has conflicting " + f"columns names: {dup_cols_in_transformer}.\n" + ) + raise ValueError( + err_msg + + "Either make sure that the transformers named above " + "do not generate columns with conflicting names or set " + "verbose_feature_names_out=True to automatically " + "prefix to the output feature names with the name " + "of the transformer to prevent any conflicting " + "names." + ) + + names_idx = 0 + for X in Xs: + if X.shape[1] == 0: + continue + names_out = feature_names_outs[names_idx : names_idx + X.shape[1]] + adapter.rename_columns(X, names_out) + names_idx += X.shape[1] + + output = adapter.hstack(Xs) + output_samples = output.shape[0] + if output_samples != n_samples: + raise ValueError( + "Concatenating DataFrames from the transformer's output lead to" + " an inconsistent number of samples. The output may have Pandas" + " Indexes that do not match, or that transformers are returning" + " number of samples which are not the same as the number input" + " samples." + ) + + return output + + return np.hstack(Xs) + + def _sk_visual_block_(self): + if isinstance(self.remainder, str) and self.remainder == "drop": + transformers = self.transformers + elif hasattr(self, "_remainder"): + remainder_columns = self._remainder[2] + if ( + hasattr(self, "feature_names_in_") + and remainder_columns + and not all(isinstance(col, str) for col in remainder_columns) + ): + remainder_columns = self.feature_names_in_[remainder_columns].tolist() + transformers = chain( + self.transformers, [("remainder", self.remainder, remainder_columns)] + ) + else: + transformers = chain(self.transformers, [("remainder", self.remainder, "")]) + + names, transformers, name_details = zip(*transformers) + return _VisualBlock( + "parallel", transformers, names=names, name_details=name_details + ) + + def _get_empty_routing(self): + """Return empty routing. + + Used while routing can be disabled. + + TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no + more an option. + """ + return Bunch( + **{ + name: Bunch(**{method: {} for method in METHODS}) + for name, step, _, _ in self._iter( + fitted=False, + column_as_labels=False, + skip_drop=True, + skip_empty_columns=True, + ) + } + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + # Here we don't care about which columns are used for which + # transformers, and whether or not a transformer is used at all, which + # might happen if no columns are selected for that transformer. We + # request all metadata requested by all transformers. + transformers = chain(self.transformers, [("remainder", self.remainder, None)]) + for name, step, _ in transformers: + method_mapping = MethodMapping() + if hasattr(step, "fit_transform"): + ( + method_mapping.add(caller="fit", callee="fit_transform").add( + caller="fit_transform", callee="fit_transform" + ) + ) + else: + ( + method_mapping.add(caller="fit", callee="fit") + .add(caller="fit", callee="transform") + .add(caller="fit_transform", callee="fit") + .add(caller="fit_transform", callee="transform") + ) + method_mapping.add(caller="transform", callee="transform") + router.add(method_mapping=method_mapping, **{name: step}) + + return router + + +def _check_X(X): + """Use check_array only when necessary, e.g. on lists and other non-array-likes.""" + if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X): + return X + return check_array(X, force_all_finite="allow-nan", dtype=object) + + +def _is_empty_column_selection(column): + """ + Return True if the column selection is empty (empty list or all-False + boolean array). + + """ + if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_): + return not column.any() + elif hasattr(column, "__len__"): + return ( + len(column) == 0 + or all(isinstance(col, bool) for col in column) + and not any(column) + ) + else: + return False + + +def _get_transformer_list(estimators): + """ + Construct (name, trans, column) tuples from list + + """ + transformers, columns = zip(*estimators) + names, _ = zip(*_name_estimators(transformers)) + + transformer_list = list(zip(names, transformers, columns)) + return transformer_list + + +# This function is not validated using validate_params because +# it's just a factory for ColumnTransformer. +def make_column_transformer( + *transformers, + remainder="drop", + sparse_threshold=0.3, + n_jobs=None, + verbose=False, + verbose_feature_names_out=True, +): + """Construct a ColumnTransformer from the given transformers. + + This is a shorthand for the ColumnTransformer constructor; it does not + require, and does not permit, naming the transformers. Instead, they will + be given names automatically based on their types. It also does not allow + weighting with ``transformer_weights``. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *transformers : tuples + Tuples of the form (transformer, columns) specifying the + transformer objects to be applied to subsets of the data. + + transformer : {'drop', 'passthrough'} or estimator + Estimator must support :term:`fit` and :term:`transform`. + Special-cased strings 'drop' and 'passthrough' are accepted as + well, to indicate to drop the columns or to pass them through + untransformed, respectively. + columns : str, array-like of str, int, array-like of int, slice, \ + array-like of bool or callable + Indexes the data on its second axis. Integers are interpreted as + positional columns, while strings can reference DataFrame columns + by name. A scalar string or int should be used where + ``transformer`` expects X to be a 1d array-like (vector), + otherwise a 2d array will be passed to the transformer. + A callable is passed the input data `X` and can return any of the + above. To select multiple columns by name or dtype, you can use + :obj:`make_column_selector`. + + remainder : {'drop', 'passthrough'} or estimator, default='drop' + By default, only the specified columns in `transformers` are + transformed and combined in the output, and the non-specified + columns are dropped. (default of ``'drop'``). + By specifying ``remainder='passthrough'``, all remaining columns that + were not specified in `transformers` will be automatically passed + through. This subset of columns is concatenated with the output of + the transformers. + By setting ``remainder`` to be an estimator, the remaining + non-specified columns will use the ``remainder`` estimator. The + estimator must support :term:`fit` and :term:`transform`. + + sparse_threshold : float, default=0.3 + If the transformed output consists of a mix of sparse and dense data, + it will be stacked as a sparse matrix if the density is lower than this + value. Use ``sparse_threshold=0`` to always return dense. + When the transformed output consists of all sparse or all dense data, + the stacked result will be sparse or dense, respectively, and this + keyword will be ignored. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : bool, default=False + If True, the time elapsed while fitting each transformer will be + printed as it is completed. + + verbose_feature_names_out : bool, default=True + If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix + all feature names with the name of the transformer that generated that + feature. + If False, :meth:`ColumnTransformer.get_feature_names_out` will not + prefix any feature names and will error if feature names are not + unique. + + .. versionadded:: 1.0 + + Returns + ------- + ct : ColumnTransformer + Returns a :class:`ColumnTransformer` object. + + See Also + -------- + ColumnTransformer : Class that allows combining the + outputs of multiple transformer objects used on column subsets + of the data into a single feature space. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder + >>> from sklearn.compose import make_column_transformer + >>> make_column_transformer( + ... (StandardScaler(), ['numerical_column']), + ... (OneHotEncoder(), ['categorical_column'])) + ColumnTransformer(transformers=[('standardscaler', StandardScaler(...), + ['numerical_column']), + ('onehotencoder', OneHotEncoder(...), + ['categorical_column'])]) + """ + # transformer_weights keyword is not passed through because the user + # would need to know the automatically generated names of the transformers + transformer_list = _get_transformer_list(transformers) + return ColumnTransformer( + transformer_list, + n_jobs=n_jobs, + remainder=remainder, + sparse_threshold=sparse_threshold, + verbose=verbose, + verbose_feature_names_out=verbose_feature_names_out, + ) + + +class make_column_selector: + """Create a callable to select columns to be used with + :class:`ColumnTransformer`. + + :func:`make_column_selector` can select columns based on datatype or the + columns name with a regex. When using multiple selection criteria, **all** + criteria must match for a column to be selected. + + For an example of how to use :func:`make_column_selector` within a + :class:`ColumnTransformer` to select columns based on data type (i.e. + `dtype`), refer to + :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`. + + Parameters + ---------- + pattern : str, default=None + Name of columns containing this regex pattern will be included. If + None, column selection will not be selected based on pattern. + + dtype_include : column dtype or list of column dtypes, default=None + A selection of dtypes to include. For more details, see + :meth:`pandas.DataFrame.select_dtypes`. + + dtype_exclude : column dtype or list of column dtypes, default=None + A selection of dtypes to exclude. For more details, see + :meth:`pandas.DataFrame.select_dtypes`. + + Returns + ------- + selector : callable + Callable for column selection to be used by a + :class:`ColumnTransformer`. + + See Also + -------- + ColumnTransformer : Class that allows combining the + outputs of multiple transformer objects used on column subsets + of the data into a single feature space. + + Examples + -------- + >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder + >>> from sklearn.compose import make_column_transformer + >>> from sklearn.compose import make_column_selector + >>> import numpy as np + >>> import pandas as pd # doctest: +SKIP + >>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'], + ... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP + >>> ct = make_column_transformer( + ... (StandardScaler(), + ... make_column_selector(dtype_include=np.number)), # rating + ... (OneHotEncoder(), + ... make_column_selector(dtype_include=object))) # city + >>> ct.fit_transform(X) # doctest: +SKIP + array([[ 0.90453403, 1. , 0. , 0. ], + [-1.50755672, 1. , 0. , 0. ], + [-0.30151134, 0. , 1. , 0. ], + [ 0.90453403, 0. , 0. , 1. ]]) + """ + + def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None): + self.pattern = pattern + self.dtype_include = dtype_include + self.dtype_exclude = dtype_exclude + + def __call__(self, df): + """Callable for column selection to be used by a + :class:`ColumnTransformer`. + + Parameters + ---------- + df : dataframe of shape (n_features, n_samples) + DataFrame to select columns from. + """ + if not hasattr(df, "iloc"): + raise ValueError( + "make_column_selector can only be applied to pandas dataframes" + ) + df_row = df.iloc[:1] + if self.dtype_include is not None or self.dtype_exclude is not None: + df_row = df_row.select_dtypes( + include=self.dtype_include, exclude=self.dtype_exclude + ) + cols = df_row.columns + if self.pattern is not None: + cols = cols[cols.str.contains(self.pattern, regex=True)] + return cols.tolist() diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/_target.py b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/_target.py new file mode 100644 index 0000000000000000000000000000000000000000..b90d235ac758bb31717b96cf82a04d92ac614599 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/_target.py @@ -0,0 +1,342 @@ +# Authors: Andreas Mueller +# Guillaume Lemaitre +# License: BSD 3 clause + +import warnings + +import numpy as np + +from ..base import BaseEstimator, RegressorMixin, _fit_context, clone +from ..exceptions import NotFittedError +from ..preprocessing import FunctionTransformer +from ..utils import _safe_indexing, check_array +from ..utils._param_validation import HasMethods +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.validation import check_is_fitted + +__all__ = ["TransformedTargetRegressor"] + + +class TransformedTargetRegressor( + _RoutingNotSupportedMixin, RegressorMixin, BaseEstimator +): + """Meta-estimator to regress on a transformed target. + + Useful for applying a non-linear transformation to the target `y` in + regression problems. This transformation can be given as a Transformer + such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a + function and its inverse such as `np.log` and `np.exp`. + + The computation during :meth:`fit` is:: + + regressor.fit(X, func(y)) + + or:: + + regressor.fit(X, transformer.transform(y)) + + The computation during :meth:`predict` is:: + + inverse_func(regressor.predict(X)) + + or:: + + transformer.inverse_transform(regressor.predict(X)) + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + regressor : object, default=None + Regressor object such as derived from + :class:`~sklearn.base.RegressorMixin`. This regressor will + automatically be cloned each time prior to fitting. If `regressor is + None`, :class:`~sklearn.linear_model.LinearRegression` is created and used. + + transformer : object, default=None + Estimator object such as derived from + :class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time + as `func` and `inverse_func`. If `transformer is None` as well as + `func` and `inverse_func`, the transformer will be an identity + transformer. Note that the transformer will be cloned during fitting. + Also, the transformer is restricting `y` to be a numpy array. + + func : function, default=None + Function to apply to `y` before passing to :meth:`fit`. Cannot be set + at the same time as `transformer`. The function needs to return a + 2-dimensional array. If `func is None`, the function used will be the + identity function. + + inverse_func : function, default=None + Function to apply to the prediction of the regressor. Cannot be set at + the same time as `transformer`. The function needs to return a + 2-dimensional array. The inverse function is used to return + predictions to the same space of the original training labels. + + check_inverse : bool, default=True + Whether to check that `transform` followed by `inverse_transform` + or `func` followed by `inverse_func` leads to the original targets. + + Attributes + ---------- + regressor_ : object + Fitted regressor. + + transformer_ : object + Transformer used in :meth:`fit` and :meth:`predict`. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if the + underlying regressor exposes such an attribute when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.preprocessing.FunctionTransformer : Construct a transformer from an + arbitrary callable. + + Notes + ----- + Internally, the target `y` is always converted into a 2-dimensional array + to be used by scikit-learn transformers. At the time of prediction, the + output will be reshaped to a have the same number of dimensions as `y`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.compose import TransformedTargetRegressor + >>> tt = TransformedTargetRegressor(regressor=LinearRegression(), + ... func=np.log, inverse_func=np.exp) + >>> X = np.arange(4).reshape(-1, 1) + >>> y = np.exp(2 * X).ravel() + >>> tt.fit(X, y) + TransformedTargetRegressor(...) + >>> tt.score(X, y) + 1.0 + >>> tt.regressor_.coef_ + array([2.]) + + For a more detailed example use case refer to + :ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`. + """ + + _parameter_constraints: dict = { + "regressor": [HasMethods(["fit", "predict"]), None], + "transformer": [HasMethods("transform"), None], + "func": [callable, None], + "inverse_func": [callable, None], + "check_inverse": ["boolean"], + } + + def __init__( + self, + regressor=None, + *, + transformer=None, + func=None, + inverse_func=None, + check_inverse=True, + ): + self.regressor = regressor + self.transformer = transformer + self.func = func + self.inverse_func = inverse_func + self.check_inverse = check_inverse + + def _fit_transformer(self, y): + """Check transformer and fit transformer. + + Create the default transformer, fit it and make additional inverse + check on a subset (optional). + + """ + if self.transformer is not None and ( + self.func is not None or self.inverse_func is not None + ): + raise ValueError( + "'transformer' and functions 'func'/'inverse_func' cannot both be set." + ) + elif self.transformer is not None: + self.transformer_ = clone(self.transformer) + else: + if self.func is not None and self.inverse_func is None: + raise ValueError( + "When 'func' is provided, 'inverse_func' must also be provided" + ) + self.transformer_ = FunctionTransformer( + func=self.func, + inverse_func=self.inverse_func, + validate=True, + check_inverse=self.check_inverse, + ) + # XXX: sample_weight is not currently passed to the + # transformer. However, if transformer starts using sample_weight, the + # code should be modified accordingly. At the time to consider the + # sample_prop feature, it is also a good use case to be considered. + self.transformer_.fit(y) + if self.check_inverse: + idx_selected = slice(None, None, max(1, y.shape[0] // 10)) + y_sel = _safe_indexing(y, idx_selected) + y_sel_t = self.transformer_.transform(y_sel) + if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)): + warnings.warn( + ( + "The provided functions or transformer are" + " not strictly inverse of each other. If" + " you are sure you want to proceed regardless" + ", set 'check_inverse=False'" + ), + UserWarning, + ) + + @_fit_context( + # TransformedTargetRegressor.regressor/transformer are not validated yet. + prefer_skip_nested_validation=False + ) + def fit(self, X, y, **fit_params): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values. + + **fit_params : dict + Parameters passed to the `fit` method of the underlying + regressor. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", **fit_params) + if y is None: + raise ValueError( + f"This {self.__class__.__name__} estimator " + "requires y to be passed, but the target y is None." + ) + y = check_array( + y, + input_name="y", + accept_sparse=False, + force_all_finite=True, + ensure_2d=False, + dtype="numeric", + allow_nd=True, + ) + + # store the number of dimension of the target to predict an array of + # similar shape at predict + self._training_dim = y.ndim + + # transformers are designed to modify X which is 2d dimensional, we + # need to modify y accordingly. + if y.ndim == 1: + y_2d = y.reshape(-1, 1) + else: + y_2d = y + self._fit_transformer(y_2d) + + # transform y and convert back to 1d array if needed + y_trans = self.transformer_.transform(y_2d) + # FIXME: a FunctionTransformer can return a 1D array even when validate + # is set to True. Therefore, we need to check the number of dimension + # first. + if y_trans.ndim == 2 and y_trans.shape[1] == 1: + y_trans = y_trans.squeeze(axis=1) + + if self.regressor is None: + from ..linear_model import LinearRegression + + self.regressor_ = LinearRegression() + else: + self.regressor_ = clone(self.regressor) + + self.regressor_.fit(X, y_trans, **fit_params) + + if hasattr(self.regressor_, "feature_names_in_"): + self.feature_names_in_ = self.regressor_.feature_names_in_ + + return self + + def predict(self, X, **predict_params): + """Predict using the base regressor, applying inverse. + + The regressor is used to predict and the `inverse_func` or + `inverse_transform` is applied before returning the prediction. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + **predict_params : dict of str -> object + Parameters passed to the `predict` method of the underlying + regressor. + + Returns + ------- + y_hat : ndarray of shape (n_samples,) + Predicted values. + """ + check_is_fitted(self) + pred = self.regressor_.predict(X, **predict_params) + if pred.ndim == 1: + pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1)) + else: + pred_trans = self.transformer_.inverse_transform(pred) + if ( + self._training_dim == 1 + and pred_trans.ndim == 2 + and pred_trans.shape[1] == 1 + ): + pred_trans = pred_trans.squeeze(axis=1) + + return pred_trans + + def _more_tags(self): + regressor = self.regressor + if regressor is None: + from ..linear_model import LinearRegression + + regressor = LinearRegression() + + return { + "poor_score": True, + "multioutput": _safe_tags(regressor, key="multioutput"), + } + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`.""" + # For consistency with other estimators we raise a AttributeError so + # that hasattr() returns False the estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.regressor_.n_features_in_ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd81419b8839c977e3dea828061125460ec9478c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..421692f98d29940df49274c548548f5116fe365f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d588648a4ce0a4d16f66630abc2769e7aced8f1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e21c1a17010efcba076b4758eb024e0ef760b073 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py @@ -0,0 +1,2582 @@ +""" +Test the ColumnTransformer. +""" + +import pickle +import re +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import sparse + +from sklearn.base import BaseEstimator, TransformerMixin +from sklearn.compose import ( + ColumnTransformer, + make_column_selector, + make_column_transformer, +) +from sklearn.exceptions import NotFittedError +from sklearn.feature_selection import VarianceThreshold +from sklearn.preprocessing import ( + FunctionTransformer, + Normalizer, + OneHotEncoder, + StandardScaler, +) +from sklearn.tests.metadata_routing_common import ( + ConsumingTransformer, + _Registry, + check_recorded_metadata, +) +from sklearn.utils._testing import ( + _convert_container, + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +class Trans(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + # 1D Series -> 2D DataFrame + if hasattr(X, "to_frame"): + return X.to_frame() + # 1D array -> 2D array + if getattr(X, "ndim", 2) == 1: + return np.atleast_2d(X).T + return X + + +class DoubleTrans(BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X): + return 2 * X + + +class SparseMatrixTrans(BaseEstimator): + def __init__(self, csr_container): + self.csr_container = csr_container + + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + n_samples = len(X) + return self.csr_container(sparse.eye(n_samples, n_samples)) + + +class TransNo2D(BaseEstimator): + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + return X + + +class TransRaise(BaseEstimator): + def fit(self, X, y=None): + raise ValueError("specific message") + + def transform(self, X, y=None): + raise ValueError("specific message") + + +def test_column_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + X_res_first1D = np.array([0, 1, 2]) + X_res_second1D = np.array([2, 4, 6]) + X_res_first = X_res_first1D.reshape(-1, 1) + X_res_both = X_array + + cases = [ + # single column 1D / 2D + (0, X_res_first), + ([0], X_res_first), + # list-like + ([0, 1], X_res_both), + (np.array([0, 1]), X_res_both), + # slice + (slice(0, 1), X_res_first), + (slice(0, 2), X_res_both), + # boolean mask + (np.array([True, False]), X_res_first), + ([True, False], X_res_first), + (np.array([True, True]), X_res_both), + ([True, True], X_res_both), + ] + + for selection, res in cases: + ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop") + assert_array_equal(ct.fit_transform(X_array), res) + assert_array_equal(ct.fit(X_array).transform(X_array), res) + + # callable that returns any of the allowed specifiers + ct = ColumnTransformer( + [("trans", Trans(), lambda x: selection)], remainder="drop" + ) + assert_array_equal(ct.fit_transform(X_array), res) + assert_array_equal(ct.fit(X_array).transform(X_array), res) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + + # test with transformer_weights + transformer_weights = {"trans1": 0.1, "trans2": 10} + both = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + transformer_weights=transformer_weights, + ) + res = np.vstack( + [ + transformer_weights["trans1"] * X_res_first1D, + transformer_weights["trans2"] * X_res_second1D, + ] + ).T + assert_array_equal(both.fit_transform(X_array), res) + assert_array_equal(both.fit(X_array).transform(X_array), res) + assert len(both.transformers_) == 2 + + both = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both) + assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + + +def test_column_transformer_tuple_transformers_parameter(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])] + + ct_with_list = ColumnTransformer(transformers) + ct_with_tuple = ColumnTransformer(tuple(transformers)) + + assert_array_equal( + ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array) + ) + assert_array_equal( + ct_with_list.fit(X_array).transform(X_array), + ct_with_tuple.fit(X_array).transform(X_array), + ) + + +@pytest.mark.parametrize("constructor_name", ["dataframe", "polars"]) +def test_column_transformer_dataframe(constructor_name): + if constructor_name == "dataframe": + dataframe_lib = pytest.importorskip("pandas") + else: + dataframe_lib = pytest.importorskip(constructor_name) + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = _convert_container( + X_array, constructor_name, columns_name=["first", "second"] + ) + + X_res_first = np.array([0, 1, 2]).reshape(-1, 1) + X_res_both = X_array + + cases = [ + # String keys: label based + # list + (["first"], X_res_first), + (["first", "second"], X_res_both), + # slice + (slice("first", "second"), X_res_both), + # int keys: positional + # list + ([0], X_res_first), + ([0, 1], X_res_both), + (np.array([0, 1]), X_res_both), + # slice + (slice(0, 1), X_res_first), + (slice(0, 2), X_res_both), + # boolean mask + (np.array([True, False]), X_res_first), + ([True, False], X_res_first), + ] + if constructor_name == "dataframe": + # Scalars are only supported for pandas dataframes. + cases.extend( + [ + # scalar + (0, X_res_first), + ("first", X_res_first), + ( + dataframe_lib.Series([True, False], index=["first", "second"]), + X_res_first, + ), + ] + ) + + for selection, res in cases: + ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df), res) + assert_array_equal(ct.fit(X_df).transform(X_df), res) + + # callable that returns any of the allowed specifiers + ct = ColumnTransformer( + [("trans", Trans(), lambda X: selection)], remainder="drop" + ) + assert_array_equal(ct.fit_transform(X_df), res) + assert_array_equal(ct.fit(X_df).transform(X_df), res) + + ct = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])] + ) + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # test with transformer_weights + transformer_weights = {"trans1": 0.1, "trans2": 10} + both = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])], + transformer_weights=transformer_weights, + ) + res = np.vstack( + [ + transformer_weights["trans1"] * X_df["first"], + transformer_weights["trans2"] * X_df["second"], + ] + ).T + assert_array_equal(both.fit_transform(X_df), res) + assert_array_equal(both.fit(X_df).transform(X_df), res) + assert len(both.transformers_) == 2 + assert both.transformers_[-1][0] != "remainder" + + # test multiple columns + both = ColumnTransformer( + [("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) + assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + assert both.transformers_[-1][0] != "remainder" + + both = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) + assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) + assert len(both.transformers_) == 1 + assert both.transformers_[-1][0] != "remainder" + + # ensure pandas object is passed through + + class TransAssert(BaseEstimator): + def __init__(self, expected_type_transform): + self.expected_type_transform = expected_type_transform + + def fit(self, X, y=None): + return self + + def transform(self, X, y=None): + assert isinstance(X, self.expected_type_transform) + if isinstance(X, dataframe_lib.Series): + X = X.to_frame() + return X + + ct = ColumnTransformer( + [ + ( + "trans", + TransAssert(expected_type_transform=dataframe_lib.DataFrame), + ["first", "second"], + ) + ] + ) + ct.fit_transform(X_df) + + if constructor_name == "dataframe": + # DataFrame protocol does not have 1d columns, so we only test on Pandas + # dataframes. + ct = ColumnTransformer( + [ + ( + "trans", + TransAssert(expected_type_transform=dataframe_lib.Series), + "first", + ) + ], + remainder="drop", + ) + ct.fit_transform(X_df) + + # Only test on pandas because the dataframe protocol requires string column + # names + # integer column spec + integer column names -> still use positional + X_df2 = X_df.copy() + X_df2.columns = [1, 0] + ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df2), X_res_first) + assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first) + + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][1] == "drop" + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"]) +@pytest.mark.parametrize( + "column_selection", + [[], np.array([False, False]), [False, False]], + ids=["list", "bool", "bool_int"], +) +@pytest.mark.parametrize("callable_column", [False, True]) +def test_column_transformer_empty_columns(pandas, column_selection, callable_column): + # test case that ensures that the column transformer does also work when + # a given transformer doesn't have any columns to work on + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_both = X_array + + if pandas: + pd = pytest.importorskip("pandas") + X = pd.DataFrame(X_array, columns=["first", "second"]) + else: + X = X_array + + if callable_column: + column = lambda X: column_selection # noqa + else: + column = column_selection + + ct = ColumnTransformer( + [("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)] + ) + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 + assert isinstance(ct.transformers_[1][1], TransRaise) + + ct = ColumnTransformer( + [("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])] + ) + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 + assert isinstance(ct.transformers_[0][1], TransRaise) + + ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X), X_res_both) + assert_array_equal(ct.fit(X).transform(X), X_res_both) + assert len(ct.transformers_) == 2 # including remainder + assert isinstance(ct.transformers_[0][1], TransRaise) + + fixture = np.array([[], [], []]) + ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop") + assert_array_equal(ct.fit_transform(X), fixture) + assert_array_equal(ct.fit(X).transform(X), fixture) + assert len(ct.transformers_) == 2 # including remainder + assert isinstance(ct.transformers_[0][1], TransRaise) + + +def test_column_transformer_output_indices(): + # Checks for the output_indices_ attribute + X_array = np.arange(6).reshape(3, 2) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + + # test with transformer_weights and multiple columns + ct = ColumnTransformer( + [("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1} + ) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)} + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + # test case that ensures that the attribute does also work when + # a given transformer doesn't have any columns to work on + ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])]) + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == { + "trans1": slice(0, 2), + "trans2": slice(0, 0), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough") + X_trans = ct.fit_transform(X_array) + assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)} + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]]) + assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]]) + + +def test_column_transformer_output_indices_df(): + # Checks for the output_indices_ attribute with data frames + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"]) + + ct = ColumnTransformer( + [("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])] + ) + X_trans = ct.fit_transform(X_df) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])]) + X_trans = ct.fit_transform(X_df) + assert ct.output_indices_ == { + "trans1": slice(0, 1), + "trans2": slice(1, 2), + "remainder": slice(0, 0), + } + assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]]) + assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]]) + assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_array(csr_container): + X_sparse = csr_container(sparse.eye(3, 2)) + + # no distinction between 1D and 2D + X_res_first = X_sparse[:, [0]] + X_res_both = X_sparse + + for col in [(0,), [0], slice(0, 1)]: + for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]: + ct = ColumnTransformer( + [("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8 + ) + assert sparse.issparse(ct.fit_transform(X_sparse)) + assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res) + assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res) + + for col in [[0, 1], slice(0, 2)]: + ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8) + assert sparse.issparse(ct.fit_transform(X_sparse)) + assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both) + assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both) + + +def test_column_transformer_list(): + X_list = [[1, float("nan"), "a"], [0, 0, "b"]] + expected_result = np.array( + [ + [1, float("nan"), 1, 0], + [-1, 0, 0, 1], + ] + ) + + ct = ColumnTransformer( + [ + ("numerical", StandardScaler(), [0, 1]), + ("categorical", OneHotEncoder(), [2]), + ] + ) + + assert_array_equal(ct.fit_transform(X_list), expected_result) + assert_array_equal(ct.fit(X_list).transform(X_list), expected_result) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_stacking(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + col_trans = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)], + sparse_threshold=0.8, + ) + col_trans.fit(X_array) + X_trans = col_trans.transform(X_array) + assert sparse.issparse(X_trans) + assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) + assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0])) + assert len(col_trans.transformers_) == 2 + assert col_trans.transformers_[-1][0] != "remainder" + + col_trans = ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)], + sparse_threshold=0.1, + ) + col_trans.fit(X_array) + X_trans = col_trans.transform(X_array) + assert not sparse.issparse(X_trans) + assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) + assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0])) + + +def test_column_transformer_mixed_cols_sparse(): + df = np.array([["a", 1, True], ["b", 2, False]], dtype="O") + + ct = make_column_transformer( + (OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0 + ) + + # this shouldn't fail, since boolean can be coerced into a numeric + # See: https://github.com/scikit-learn/scikit-learn/issues/11912 + X_trans = ct.fit_transform(df) + assert X_trans.getformat() == "csr" + assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]])) + + ct = make_column_transformer( + (OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0 + ) + with pytest.raises(ValueError, match="For a sparse output, all columns should"): + # this fails since strings `a` and `b` cannot be + # coerced into a numeric. + ct.fit_transform(df) + + +def test_column_transformer_sparse_threshold(): + X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T + # above data has sparsity of 4 / 8 = 0.5 + + # apply threshold even if all sparse + col_trans = ColumnTransformer( + [("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])], + sparse_threshold=0.2, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + # mixed -> sparsity of (4 + 2) / 8 = 0.75 + for thres in [0.75001, 1]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=True), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert sparse.issparse(res) + assert col_trans.sparse_output_ + + for thres in [0.75, 0]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=True), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + # if nothing is sparse -> no sparse + for thres in [0.33, 0, 1]: + col_trans = ColumnTransformer( + [ + ("trans1", OneHotEncoder(sparse_output=False), [0]), + ("trans2", OneHotEncoder(sparse_output=False), [1]), + ], + sparse_threshold=thres, + ) + res = col_trans.fit_transform(X_array) + assert not sparse.issparse(res) + assert not col_trans.sparse_output_ + + +def test_column_transformer_error_msg_1D(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + + col_trans = ColumnTransformer([("trans", StandardScaler(), 0)]) + msg = "1D data passed to a transformer" + with pytest.raises(ValueError, match=msg): + col_trans.fit(X_array) + + with pytest.raises(ValueError, match=msg): + col_trans.fit_transform(X_array) + + col_trans = ColumnTransformer([("trans", TransRaise(), 0)]) + for func in [col_trans.fit, col_trans.fit_transform]: + with pytest.raises(ValueError, match="specific message"): + func(X_array) + + +def test_2D_transformer_output(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + # if one transformer is dropped, test that name is still correct + ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)]) + + msg = "the 'trans2' transformer should be 2D" + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X_array) + # because fit is also doing transform, this raises already on fit + with pytest.raises(ValueError, match=msg): + ct.fit(X_array) + + +def test_2D_transformer_output_pandas(): + pd = pytest.importorskip("pandas") + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["col1", "col2"]) + + # if one transformer is dropped, test that name is still correct + ct = ColumnTransformer([("trans1", TransNo2D(), "col1")]) + msg = "the 'trans1' transformer should be 2D" + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X_df) + # because fit is also doing transform, this raises already on fit + with pytest.raises(ValueError, match=msg): + ct.fit(X_df) + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transformer_invalid_columns(remainder): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + # general invalid + for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]: + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + with pytest.raises(ValueError, match="No valid specification"): + ct.fit(X_array) + + # invalid for arrays + for col in ["string", ["string", "other"], slice("a", "b")]: + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + with pytest.raises(ValueError, match="Specifying the columns"): + ct.fit(X_array) + + # transformed n_features does not match fitted n_features + col = [0, 1] + ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder) + ct.fit(X_array) + X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T + msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input." + with pytest.raises(ValueError, match=msg): + ct.transform(X_array_more) + X_array_fewer = np.array( + [ + [0, 1, 2], + ] + ).T + err_msg = ( + "X has 1 features, but ColumnTransformer is expecting 2 features as input." + ) + with pytest.raises(ValueError, match=err_msg): + ct.transform(X_array_fewer) + + +def test_column_transformer_invalid_transformer(): + class NoTrans(BaseEstimator): + def fit(self, X, y=None): + return self + + def predict(self, X): + return X + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + ct = ColumnTransformer([("trans", NoTrans(), [0])]) + msg = "All estimators should implement fit and transform" + with pytest.raises(TypeError, match=msg): + ct.fit(X_array) + + +def test_make_column_transformer(): + scaler = StandardScaler() + norm = Normalizer() + ct = make_column_transformer((scaler, "first"), (norm, ["second"])) + names, transformers, columns = zip(*ct.transformers) + assert names == ("standardscaler", "normalizer") + assert transformers == (scaler, norm) + assert columns == ("first", ["second"]) + + +def test_make_column_transformer_pandas(): + pd = pytest.importorskip("pandas") + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + norm = Normalizer() + ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)]) + ct2 = make_column_transformer((norm, X_df.columns)) + assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df)) + + +def test_make_column_transformer_kwargs(): + scaler = StandardScaler() + norm = Normalizer() + ct = make_column_transformer( + (scaler, "first"), + (norm, ["second"]), + n_jobs=3, + remainder="drop", + sparse_threshold=0.5, + ) + assert ( + ct.transformers + == make_column_transformer((scaler, "first"), (norm, ["second"])).transformers + ) + assert ct.n_jobs == 3 + assert ct.remainder == "drop" + assert ct.sparse_threshold == 0.5 + # invalid keyword parameters should raise an error message + msg = re.escape( + "make_column_transformer() got an unexpected " + "keyword argument 'transformer_weights'" + ) + with pytest.raises(TypeError, match=msg): + make_column_transformer( + (scaler, "first"), + (norm, ["second"]), + transformer_weights={"pca": 10, "Transf": 1}, + ) + + +def test_make_column_transformer_remainder_transformer(): + scaler = StandardScaler() + norm = Normalizer() + remainder = StandardScaler() + ct = make_column_transformer( + (scaler, "first"), (norm, ["second"]), remainder=remainder + ) + assert ct.remainder == remainder + + +def test_column_transformer_get_set_params(): + ct = ColumnTransformer( + [("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])] + ) + + exp = { + "n_jobs": None, + "remainder": "drop", + "sparse_threshold": 0.3, + "trans1": ct.transformers[0][1], + "trans1__copy": True, + "trans1__with_mean": True, + "trans1__with_std": True, + "trans2": ct.transformers[1][1], + "trans2__copy": True, + "trans2__with_mean": True, + "trans2__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + ct.set_params(trans1__with_mean=False) + assert not ct.get_params()["trans1__with_mean"] + + ct.set_params(trans1="passthrough") + exp = { + "n_jobs": None, + "remainder": "drop", + "sparse_threshold": 0.3, + "trans1": "passthrough", + "trans2": ct.transformers[1][1], + "trans2__copy": True, + "trans2__with_mean": True, + "trans2__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + +def test_column_transformer_named_estimators(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer( + [ + ("trans1", StandardScaler(), [0]), + ("trans2", StandardScaler(with_std=False), [1]), + ] + ) + assert not hasattr(ct, "transformers_") + ct.fit(X_array) + assert hasattr(ct, "transformers_") + assert isinstance(ct.named_transformers_["trans1"], StandardScaler) + assert isinstance(ct.named_transformers_.trans1, StandardScaler) + assert isinstance(ct.named_transformers_["trans2"], StandardScaler) + assert isinstance(ct.named_transformers_.trans2, StandardScaler) + assert not ct.named_transformers_.trans2.with_std + # check it are fitted transformers + assert ct.named_transformers_.trans1.mean_ == 1.0 + + +def test_column_transformer_cloning(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + + ct = ColumnTransformer([("trans", StandardScaler(), [0])]) + ct.fit(X_array) + assert not hasattr(ct.transformers[0][1], "mean_") + assert hasattr(ct.transformers_[0][1], "mean_") + + ct = ColumnTransformer([("trans", StandardScaler(), [0])]) + ct.fit_transform(X_array) + assert not hasattr(ct.transformers[0][1], "mean_") + assert hasattr(ct.transformers_[0][1], "mean_") + + +def test_column_transformer_get_feature_names(): + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans", Trans(), [0, 1])]) + # raise correct error when not fitted + with pytest.raises(NotFittedError): + ct.get_feature_names_out() + # raise correct error when no feature names are available + ct.fit(X_array) + msg = re.escape( + "Transformer trans (type Trans) does not provide get_feature_names_out" + ) + with pytest.raises(AttributeError, match=msg): + ct.get_feature_names_out() + + +def test_column_transformer_special_strings(): + # one 'drop' -> ignore + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])]) + exp = np.array([[0.0], [1.0], [2.0]]) + assert_array_equal(ct.fit_transform(X_array), exp) + assert_array_equal(ct.fit(X_array).transform(X_array), exp) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # all 'drop' -> return shape 0 array + ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])]) + assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0)) + assert_array_equal(ct.fit_transform(X_array).shape, (3, 0)) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + # 'passthrough' + X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T + ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])]) + exp = X_array + assert_array_equal(ct.fit_transform(X_array), exp) + assert_array_equal(ct.fit(X_array).transform(X_array), exp) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] != "remainder" + + +def test_column_transformer_remainder(): + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + + X_res_first = np.array([0, 1, 2]).reshape(-1, 1) + X_res_second = np.array([2, 4, 6]).reshape(-1, 1) + X_res_both = X_array + + # default drop + ct = ColumnTransformer([("trans1", Trans(), [0])]) + assert_array_equal(ct.fit_transform(X_array), X_res_first) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][1] == "drop" + assert_array_equal(ct.transformers_[-1][2], [1]) + + # specify passthrough + ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + # column order is not preserved (passed through added to end) + ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1]) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1]) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [0]) + + # passthrough when all actual transformers are skipped + ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_second) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + # check default for make_column_transformer + ct = make_column_transformer((Trans(), [0])) + assert ct.remainder == "drop" + + +@pytest.mark.parametrize( + "key", [[0], np.array([0]), slice(0, 1), np.array([True, False])] +) +def test_column_transformer_remainder_numpy(key): + # test different ways that columns are specified with passthrough + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_both = X_array + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize( + "key", + [ + [0], + slice(0, 1), + np.array([True, False]), + ["first"], + "pd-index", + np.array(["first"]), + np.array(["first"], dtype=object), + slice(None, "first"), + slice("first", "first"), + ], +) +def test_column_transformer_remainder_pandas(key): + # test different ways that columns are specified with passthrough + pd = pytest.importorskip("pandas") + if isinstance(key, str) and key == "pd-index": + key = pd.Index(["first"]) + + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + X_res_both = X_array + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough") + assert_array_equal(ct.fit_transform(X_df), X_res_both) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], FunctionTransformer) + assert_array_equal(ct.transformers_[-1][2], [1]) + + +@pytest.mark.parametrize( + "key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])] +) +def test_column_transformer_remainder_transformer(key): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + X_res_both = X_array.copy() + + # second and third columns are doubled when remainder = DoubleTrans + X_res_both[:, 1:3] *= 2 + + ct = ColumnTransformer([("trans1", Trans(), key)], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], DoubleTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +def test_column_transformer_no_remaining_remainder_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_array) + assert_array_equal(ct.fit(X_array).transform(X_array), X_array) + assert len(ct.transformers_) == 1 + assert ct.transformers_[-1][0] != "remainder" + + +def test_column_transformer_drops_all_remainder_transformer(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + # columns are doubled when remainder = DoubleTrans + X_res_both = 2 * X_array.copy()[:, 1:3] + + ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans()) + + assert_array_equal(ct.fit_transform(X_array), X_res_both) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], DoubleTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_sparse_remainder_transformer(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + ct = ColumnTransformer( + [("trans1", Trans(), [0])], + remainder=SparseMatrixTrans(csr_container), + sparse_threshold=0.8, + ) + + X_trans = ct.fit_transform(X_array) + assert sparse.issparse(X_trans) + # SparseMatrixTrans creates 3 features for each column. There is + # one column in ``transformers``, thus: + assert X_trans.shape == (3, 3 + 1) + + exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3))) + assert_array_equal(X_trans.toarray(), exp_array) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + ct = ColumnTransformer( + [("trans1", "drop", [0])], + remainder=SparseMatrixTrans(csr_container), + sparse_threshold=0.8, + ) + + X_trans = ct.fit_transform(X_array) + assert sparse.issparse(X_trans) + + # SparseMatrixTrans creates 3 features for each column, thus: + assert X_trans.shape == (3, 3) + assert_array_equal(X_trans.toarray(), np.eye(3)) + assert len(ct.transformers_) == 2 + assert ct.transformers_[-1][0] == "remainder" + assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) + assert_array_equal(ct.transformers_[-1][2], [1, 2]) + + +def test_column_transformer_get_set_params_with_remainder(): + ct = ColumnTransformer( + [("trans1", StandardScaler(), [0])], remainder=StandardScaler() + ) + + exp = { + "n_jobs": None, + "remainder": ct.remainder, + "remainder__copy": True, + "remainder__with_mean": True, + "remainder__with_std": True, + "sparse_threshold": 0.3, + "trans1": ct.transformers[0][1], + "trans1__copy": True, + "trans1__with_mean": True, + "trans1__with_std": True, + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + + assert ct.get_params() == exp + + ct.set_params(remainder__with_std=False) + assert not ct.get_params()["remainder__with_std"] + + ct.set_params(trans1="passthrough") + exp = { + "n_jobs": None, + "remainder": ct.remainder, + "remainder__copy": True, + "remainder__with_mean": True, + "remainder__with_std": False, + "sparse_threshold": 0.3, + "trans1": "passthrough", + "transformers": ct.transformers, + "transformer_weights": None, + "verbose_feature_names_out": True, + "verbose": False, + } + assert ct.get_params() == exp + + +def test_column_transformer_no_estimators(): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T + ct = ColumnTransformer([], remainder=StandardScaler()) + + params = ct.get_params() + assert params["remainder__with_mean"] + + X_trans = ct.fit_transform(X_array) + assert X_trans.shape == X_array.shape + assert len(ct.transformers_) == 1 + assert ct.transformers_[-1][0] == "remainder" + assert ct.transformers_[-1][2] == [0, 1, 2] + + +@pytest.mark.parametrize( + ["est", "pattern"], + [ + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + remainder=DoubleTrans(), + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", "drop", [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", "passthrough", [1])], + remainder="passthrough", + ), + ( + r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n" + r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$" + ), + ), + ( + ColumnTransformer( + [("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop" + ), + ( + r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n" + r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$" + ), + ), + ( + ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"), + r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$", + ), + ], +) +@pytest.mark.parametrize("method", ["fit", "fit_transform"]) +def test_column_transformer_verbose(est, pattern, method, capsys): + X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T + + func = getattr(est, method) + est.set_params(verbose=False) + func(X_array) + assert not capsys.readouterr().out, "Got output for verbose=False" + + est.set_params(verbose=True) + func(X_array) + assert re.match(pattern, capsys.readouterr()[0]) + + +def test_column_transformer_no_estimators_set_params(): + ct = ColumnTransformer([]).set_params(n_jobs=2) + assert ct.n_jobs == 2 + + +def test_column_transformer_callable_specifier(): + # assert that function gets the full array + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_first = np.array([[0, 1, 2]]).T + + def func(X): + assert_array_equal(X, X_array) + return [0] + + ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop") + assert_array_equal(ct.fit_transform(X_array), X_res_first) + assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) + assert callable(ct.transformers[0][2]) + assert ct.transformers_[0][2] == [0] + + +def test_column_transformer_callable_specifier_dataframe(): + # assert that function gets the full dataframe + pd = pytest.importorskip("pandas") + X_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_res_first = np.array([[0, 1, 2]]).T + + X_df = pd.DataFrame(X_array, columns=["first", "second"]) + + def func(X): + assert_array_equal(X.columns, X_df.columns) + assert_array_equal(X.values, X_df.values) + return ["first"] + + ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop") + assert_array_equal(ct.fit_transform(X_df), X_res_first) + assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first) + assert callable(ct.transformers[0][2]) + assert ct.transformers_[0][2] == ["first"] + + +def test_column_transformer_negative_column_indexes(): + X = np.random.randn(2, 2) + X_categories = np.array([[1], [2]]) + X = np.concatenate([X, X_categories], axis=1) + + ohe = OneHotEncoder() + + tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough") + tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough") + assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X)) + + +@pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS]) +def test_column_transformer_mask_indexing(array_type): + # Regression test for #14510 + # Boolean array-like does not behave as boolean array with sparse matrices. + X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]]) + X = array_type(X) + column_transformer = ColumnTransformer( + [("identity", FunctionTransformer(), [False, True, False, True])] + ) + X_trans = column_transformer.fit_transform(X) + assert X_trans.shape == (3, 2) + + +def test_n_features_in(): + # make sure n_features_in is what is passed as input to the column + # transformer. + + X = [[1, 2], [3, 4], [5, 6]] + ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])]) + assert not hasattr(ct, "n_features_in_") + ct.fit(X) + assert ct.n_features_in_ == 2 + + +@pytest.mark.parametrize( + "cols, pattern, include, exclude", + [ + (["col_int", "col_float"], None, np.number, None), + (["col_int", "col_float"], None, None, object), + (["col_int", "col_float"], None, [int, float], None), + (["col_str"], None, [object], None), + (["col_str"], None, object, None), + (["col_float"], None, float, None), + (["col_float"], "at$", [np.number], None), + (["col_int"], None, [int], None), + (["col_int"], "^col_int", [np.number], None), + (["col_float", "col_str"], "float|str", None, None), + (["col_str"], "^col_s", None, [int]), + ([], "str$", float, None), + (["col_int", "col_float", "col_str"], None, [np.number, object], None), + ], +) +def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_str": ["one", "two", "three"], + }, + columns=["col_int", "col_float", "col_str"], + ) + + selector = make_column_selector( + dtype_include=include, dtype_exclude=exclude, pattern=pattern + ) + + assert_array_equal(selector(X_df), cols) + + +def test_column_transformer_with_make_column_selector(): + # Functional test for column transformer + column selector + pd = pytest.importorskip("pandas") + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_cat": ["one", "two", "one"], + "col_str": ["low", "middle", "high"], + }, + columns=["col_int", "col_float", "col_cat", "col_str"], + ) + X_df["col_str"] = X_df["col_str"].astype("category") + + cat_selector = make_column_selector(dtype_include=["category", object]) + num_selector = make_column_selector(dtype_include=np.number) + + ohe = OneHotEncoder() + scaler = StandardScaler() + + ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector)) + ct_direct = make_column_transformer( + (ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"]) + ) + + X_selector = ct_selector.fit_transform(X_df) + X_direct = ct_direct.fit_transform(X_df) + + assert_allclose(X_selector, X_direct) + + +def test_make_column_selector_error(): + selector = make_column_selector(dtype_include=np.number) + X = np.array([[0.1, 0.2]]) + msg = "make_column_selector can only be applied to pandas dataframes" + with pytest.raises(ValueError, match=msg): + selector(X) + + +def test_make_column_selector_pickle(): + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame( + { + "col_int": np.array([0, 1, 2], dtype=int), + "col_float": np.array([0.0, 1.0, 2.0], dtype=float), + "col_str": ["one", "two", "three"], + }, + columns=["col_int", "col_float", "col_str"], + ) + + selector = make_column_selector(dtype_include=[object]) + selector_picked = pickle.loads(pickle.dumps(selector)) + + assert_array_equal(selector(X_df), selector_picked(X_df)) + + +@pytest.mark.parametrize( + "empty_col", + [[], np.array([], dtype=int), lambda x: []], + ids=["list", "array", "callable"], +) +def test_feature_names_empty_columns(empty_col): + pd = pytest.importorskip("pandas") + + df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) + + ct = ColumnTransformer( + transformers=[ + ("ohe", OneHotEncoder(), ["col1", "col2"]), + ("empty_features", OneHotEncoder(), empty_col), + ], + ) + + ct.fit(df) + assert_array_equal( + ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"] + ) + + +@pytest.mark.parametrize( + "selector", + [ + [1], + lambda x: [1], + ["col2"], + lambda x: ["col2"], + [False, True], + lambda x: [False, True], + ], +) +def test_feature_names_out_pandas(selector): + """Checks name when selecting only the second column""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) + ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)]) + ct.fit(df) + + assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"]) + + +@pytest.mark.parametrize( + "selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]] +) +def test_feature_names_out_non_pandas(selector): + """Checks name when selecting the second column with numpy array""" + X = [["a", "z"], ["a", "z"], ["b", "z"]] + ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)]) + ct.fit(X) + + assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"]) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder(remainder): + # remainder='passthrough' or an estimator will be shown in repr_html + ohe = OneHotEncoder() + ct = ColumnTransformer( + transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder + ) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe", "remainder") + assert visual_block.name_details == (["col1", "col2"], "") + assert visual_block.estimators == (ohe, remainder) + + +def test_sk_visual_block_remainder_drop(): + # remainder='drop' is not shown in repr_html + ohe = OneHotEncoder() + ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])]) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe",) + assert visual_block.name_details == (["col1", "col2"],) + assert visual_block.estimators == (ohe,) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder_fitted_pandas(remainder): + # Remainder shows the columns after fitting + pd = pytest.importorskip("pandas") + ohe = OneHotEncoder() + ct = ColumnTransformer( + transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder + ) + df = pd.DataFrame( + { + "col1": ["a", "b", "c"], + "col2": ["z", "z", "z"], + "col3": [1, 2, 3], + "col4": [3, 4, 5], + } + ) + ct.fit(df) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("ohe", "remainder") + assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"]) + assert visual_block.estimators == (ohe, remainder) + + +@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()]) +def test_sk_visual_block_remainder_fitted_numpy(remainder): + # Remainder shows the indices after fitting + X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float) + scaler = StandardScaler() + ct = ColumnTransformer( + transformers=[("scale", scaler, [0, 2])], remainder=remainder + ) + ct.fit(X) + visual_block = ct._sk_visual_block_() + assert visual_block.names == ("scale", "remainder") + assert visual_block.name_details == ([0, 2], [1]) + assert visual_block.estimators == (scaler, remainder) + + +@pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1]) +@pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"]) +def test_column_transformer_reordered_column_names_remainder( + explicit_colname, remainder +): + """Test the interaction between remainder and column transformer""" + pd = pytest.importorskip("pandas") + + X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T + X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"]) + + X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T + X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"]) + + tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder) + + tf.fit(X_fit_df) + X_fit_trans = tf.transform(X_fit_df) + + # Changing the order still works + X_trans = tf.transform(X_trans_df) + assert_allclose(X_trans, X_fit_trans) + + # extra columns are ignored + X_extended_df = X_fit_df.copy() + X_extended_df["third"] = [3, 6, 9] + X_trans = tf.transform(X_extended_df) + assert_allclose(X_trans, X_fit_trans) + + if isinstance(explicit_colname, str): + # Raise error if columns are specified by names but input only allows + # to specify by position, e.g. numpy array instead of a pandas df. + X_array = X_fit_array.copy() + err_msg = "Specifying the columns" + with pytest.raises(ValueError, match=err_msg): + tf.transform(X_array) + + +def test_feature_name_validation_missing_columns_drop_passthough(): + """Test the interaction between {'drop', 'passthrough'} and + missing column names.""" + pd = pytest.importorskip("pandas") + + X = np.ones(shape=(3, 4)) + df = pd.DataFrame(X, columns=["a", "b", "c", "d"]) + + df_dropped = df.drop("c", axis=1) + + # with remainder='passthrough', all columns seen during `fit` must be + # present + tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough") + tf.fit(df) + msg = r"columns are missing: {'c'}" + with pytest.raises(ValueError, match=msg): + tf.transform(df_dropped) + + # with remainder='drop', it is allowed to have column 'c' missing + tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop") + tf.fit(df) + + df_dropped_trans = tf.transform(df_dropped) + df_fit_trans = tf.transform(df) + assert_allclose(df_dropped_trans, df_fit_trans) + + # bycol drops 'c', thus it is allowed for 'c' to be missing + tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough") + tf.fit(df) + df_dropped_trans = tf.transform(df_dropped) + df_fit_trans = tf.transform(df) + assert_allclose(df_dropped_trans, df_fit_trans) + + +def test_feature_names_in_(): + """Feature names are stored in column transformer. + + Column transformer deliberately does not check for column name consistency. + It only checks that the non-dropped names seen in `fit` are seen + in `transform`. This behavior is already tested in + `test_feature_name_validation_missing_columns_drop_passthough`""" + + pd = pytest.importorskip("pandas") + + feature_names = ["a", "c", "d"] + df = pd.DataFrame([[1, 2, 3]], columns=feature_names) + ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough") + + ct.fit(df) + assert_array_equal(ct.feature_names_in_, feature_names) + assert isinstance(ct.feature_names_in_, np.ndarray) + assert ct.feature_names_in_.dtype == object + + +class TransWithNames(Trans): + def __init__(self, feature_names_out=None): + self.feature_names_out = feature_names_out + + def get_feature_names_out(self, input_features=None): + if self.feature_names_out is not None: + return np.asarray(self.feature_names_out, dtype=object) + return input_features + + +@pytest.mark.parametrize( + "transformers, remainder, expected_names", + [ + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "passthrough", + ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["bycol1__d", "bycol1__c", "bycol2__d"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["bycol1__b", "remainder__a", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]), + ], + "passthrough", + ["bycol1__pca1", "bycol1__pca2", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["d"]), + ("bycol2", "passthrough", ["b"]), + ], + "drop", + ["bycol1__a", "bycol1__b", "bycol2__b"], + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ], + "passthrough", + [ + "bycol1__pca0", + "bycol1__pca1", + "bycol2__pca0", + "bycol2__pca1", + "remainder__a", + "remainder__c", + "remainder__d", + ], + ), + ( + [ + ("bycol1", "drop", ["d"]), + ], + "drop", + [], + ), + ( + [ + ("bycol1", TransWithNames(), slice(1, 3)), + ], + "drop", + ["bycol1__b", "bycol1__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice(3, 4)), + ], + "passthrough", + ["bycol1__b", "remainder__a", "remainder__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice(3, 4)), + ], + "passthrough", + ["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"], + ), + ( + [ + ("bycol1", TransWithNames(), slice("b", "c")), + ], + "drop", + ["bycol1__b", "bycol1__c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice("c", "d")), + ], + "passthrough", + ["bycol1__b", "remainder__a"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("c", "d")), + ], + "passthrough", + [ + "bycol1__d", + "bycol1__c", + "bycol2__c", + "bycol2__d", + "remainder__a", + "remainder__b", + ], + ), + ], +) +def test_verbose_feature_names_out_true(transformers, remainder, expected_names): + """Check feature_names_out for verbose_feature_names_out=True (default)""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + ) + ct.fit(df) + + names = ct.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected_names) + + +@pytest.mark.parametrize( + "transformers, remainder, expected_names", + [ + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", ["a"]), + ], + "passthrough", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["d", "c"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["a", "d"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]), + ], + "passthrough", + ["pca1", "pca2", "c"], + ), + ( + [ + ("bycol1", TransWithNames(["a", "c"]), ["d"]), + ("bycol2", "passthrough", ["d"]), + ], + "drop", + ["a", "c", "d"], + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]), + ("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]), + ], + "passthrough", + ["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"], + ), + ( + [ + ("bycol1", "drop", ["d"]), + ], + "drop", + [], + ), + ( + [ + ("bycol1", TransWithNames(), slice(1, 2)), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice(3, 4)), + ], + "passthrough", + ["b", "a", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice(0, 2)), + ], + "drop", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(), slice("a", "b")), + ("bycol2", "drop", ["d"]), + ], + "passthrough", + ["a", "b", "c"], + ), + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "drop", slice("c", "d")), + ], + "passthrough", + ["b", "a"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("a", "b")), + ], + "drop", + ["d", "c", "a", "b"], + ), + ( + [ + ("bycol1", TransWithNames(), ["d", "c"]), + ("bycol2", "passthrough", slice("b", "b")), + ], + "drop", + ["d", "c", "b"], + ), + ], +) +def test_verbose_feature_names_out_false(transformers, remainder, expected_names): + """Check feature_names_out for verbose_feature_names_out=False""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + verbose_feature_names_out=False, + ) + ct.fit(df) + + names = ct.get_feature_names_out() + assert isinstance(names, np.ndarray) + assert names.dtype == object + assert_array_equal(names, expected_names) + + +@pytest.mark.parametrize( + "transformers, remainder, colliding_columns", + [ + ( + [ + ("bycol1", TransWithNames(), ["b"]), + ("bycol2", "passthrough", ["b"]), + ], + "drop", + "['b']", + ), + ( + [ + ("bycol1", TransWithNames(["c", "d"]), ["c"]), + ("bycol2", "passthrough", ["c"]), + ], + "drop", + "['c']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "passthrough", ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "drop", ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["c", "b"]), ["b"]), + ("bycol2", "passthrough", ["c", "b"]), + ], + "drop", + "['b', 'c']", + ), + ( + [ + ("bycol1", TransWithNames(["a"]), ["b"]), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["a"]), ["b"]), + ], + "passthrough", + "['a']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]), + ("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]), + ], + "passthrough", + "['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), slice(1, 2)), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", slice(0, 1)), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), slice("b", "c")), + ("bycol2", "passthrough", ["a"]), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ( + [ + ("bycol1", TransWithNames(["a", "b"]), ["b"]), + ("bycol2", "passthrough", slice("a", "a")), + ("bycol3", TransWithNames(["b"]), ["c"]), + ], + "passthrough", + "['a', 'b']", + ), + ], +) +def test_verbose_feature_names_out_false_errors( + transformers, remainder, colliding_columns +): + """Check feature_names_out for verbose_feature_names_out=False""" + + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]) + ct = ColumnTransformer( + transformers, + remainder=remainder, + verbose_feature_names_out=False, + ) + ct.fit(df) + + msg = re.escape( + f"Output feature names: {colliding_columns} are not unique. Please set " + "verbose_feature_names_out=True to add prefixes to feature names" + ) + with pytest.raises(ValueError, match=msg): + ct.get_feature_names_out() + + +@pytest.mark.parametrize("verbose_feature_names_out", [True, False]) +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transformer_set_output(verbose_feature_names_out, remainder): + """Check column transformer behavior with set_output.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10]) + ct = ColumnTransformer( + [("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])], + remainder=remainder, + verbose_feature_names_out=verbose_feature_names_out, + ) + X_trans = ct.fit_transform(df) + assert isinstance(X_trans, np.ndarray) + + ct.set_output(transform="pandas") + + df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20]) + X_trans = ct.transform(df_test) + assert isinstance(X_trans, pd.DataFrame) + + feature_names_out = ct.get_feature_names_out() + assert_array_equal(X_trans.columns, feature_names_out) + assert_array_equal(X_trans.index, df_test.index) + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +@pytest.mark.parametrize("fit_transform", [True, False]) +def test_column_transform_set_output_mixed(remainder, fit_transform): + """Check ColumnTransformer outputs mixed types correctly.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "pet": pd.Series(["dog", "cat", "snake"], dtype="category"), + "color": pd.Series(["green", "blue", "red"], dtype="object"), + "age": [1.4, 2.1, 4.4], + "height": [20, 40, 10], + "distance": pd.Series([20, pd.NA, 100], dtype="Int32"), + } + ) + ct = ColumnTransformer( + [ + ( + "color_encode", + OneHotEncoder(sparse_output=False, dtype="int8"), + ["color"], + ), + ("age", StandardScaler(), ["age"]), + ], + remainder=remainder, + verbose_feature_names_out=False, + ).set_output(transform="pandas") + if fit_transform: + X_trans = ct.fit_transform(df) + else: + X_trans = ct.fit(df).transform(df) + + assert isinstance(X_trans, pd.DataFrame) + assert_array_equal(X_trans.columns, ct.get_feature_names_out()) + + expected_dtypes = { + "color_blue": "int8", + "color_green": "int8", + "color_red": "int8", + "age": "float64", + "pet": "category", + "height": "int64", + "distance": "Int32", + } + for col, dtype in X_trans.dtypes.items(): + assert dtype == expected_dtypes[col] + + +@pytest.mark.parametrize("remainder", ["drop", "passthrough"]) +def test_column_transform_set_output_after_fitting(remainder): + pd = pytest.importorskip("pandas") + df = pd.DataFrame( + { + "pet": pd.Series(["dog", "cat", "snake"], dtype="category"), + "age": [1.4, 2.1, 4.4], + "height": [20, 40, 10], + } + ) + ct = ColumnTransformer( + [ + ( + "color_encode", + OneHotEncoder(sparse_output=False, dtype="int16"), + ["pet"], + ), + ("age", StandardScaler(), ["age"]), + ], + remainder=remainder, + verbose_feature_names_out=False, + ) + + # fit without calling set_output + X_trans = ct.fit_transform(df) + assert isinstance(X_trans, np.ndarray) + assert X_trans.dtype == "float64" + + ct.set_output(transform="pandas") + X_trans_df = ct.transform(df) + expected_dtypes = { + "pet_cat": "int16", + "pet_dog": "int16", + "pet_snake": "int16", + "height": "int64", + "age": "float64", + } + for col, dtype in X_trans_df.dtypes.items(): + assert dtype == expected_dtypes[col] + + +# PandasOutTransformer that does not define get_feature_names_out and always expects +# the input to be a DataFrame. +class PandasOutTransformer(BaseEstimator): + def __init__(self, offset=1.0): + self.offset = offset + + def fit(self, X, y=None): + pd = pytest.importorskip("pandas") + assert isinstance(X, pd.DataFrame) + return self + + def transform(self, X, y=None): + pd = pytest.importorskip("pandas") + assert isinstance(X, pd.DataFrame) + return X - self.offset + + def set_output(self, transform=None): + # This transformer will always output a DataFrame regardless of the + # configuration. + return self + + +@pytest.mark.parametrize( + "trans_1, expected_verbose_names, expected_non_verbose_names", + [ + ( + PandasOutTransformer(offset=2.0), + ["trans_0__feat1", "trans_1__feat0"], + ["feat1", "feat0"], + ), + ( + "drop", + ["trans_0__feat1"], + ["feat1"], + ), + ( + "passthrough", + ["trans_0__feat1", "trans_1__feat0"], + ["feat1", "feat0"], + ), + ], +) +def test_transformers_with_pandas_out_but_not_feature_names_out( + trans_1, expected_verbose_names, expected_non_verbose_names +): + """Check that set_config(transform="pandas") is compatible with more transformers. + + Specifically, if transformers returns a DataFrame, but does not define + `get_feature_names_out`. + """ + pd = pytest.importorskip("pandas") + + X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]}) + ct = ColumnTransformer( + [ + ("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]), + ("trans_1", trans_1, ["feat0"]), + ] + ) + X_trans_np = ct.fit_transform(X_df) + assert isinstance(X_trans_np, np.ndarray) + + # `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does + # not define the method. + with pytest.raises(AttributeError, match="not provide get_feature_names_out"): + ct.get_feature_names_out() + + # The feature names are prefixed because verbose_feature_names_out=True is default + ct.set_output(transform="pandas") + X_trans_df0 = ct.fit_transform(X_df) + assert_array_equal(X_trans_df0.columns, expected_verbose_names) + + ct.set_params(verbose_feature_names_out=False) + X_trans_df1 = ct.fit_transform(X_df) + assert_array_equal(X_trans_df1.columns, expected_non_verbose_names) + + +@pytest.mark.parametrize( + "empty_selection", + [[], np.array([False, False]), [False, False]], + ids=["list", "bool", "bool_int"], +) +def test_empty_selection_pandas_output(empty_selection): + """Check that pandas output works when there is an empty selection. + + Non-regression test for gh-25487 + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"]) + ct = ColumnTransformer( + [ + ("categorical", "passthrough", empty_selection), + ("numerical", StandardScaler(), ["a", "b"]), + ], + verbose_feature_names_out=True, + ) + ct.set_output(transform="pandas") + X_out = ct.fit_transform(X) + assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"]) + + ct.set_params(verbose_feature_names_out=False) + X_out = ct.fit_transform(X) + assert_array_equal(X_out.columns, ["a", "b"]) + + +def test_raise_error_if_index_not_aligned(): + """Check column transformer raises error if indices are not aligned. + + Non-regression test for gh-26210. + """ + pd = pytest.importorskip("pandas") + + X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3]) + reset_index_transformer = FunctionTransformer( + lambda x: x.reset_index(drop=True), feature_names_out="one-to-one" + ) + + ct = ColumnTransformer( + [ + ("num1", "passthrough", ["a"]), + ("num2", reset_index_transformer, ["b"]), + ], + ) + ct.set_output(transform="pandas") + msg = ( + "Concatenating DataFrames from the transformer's output lead to" + " an inconsistent number of samples. The output may have Pandas" + " Indexes that do not match." + ) + with pytest.raises(ValueError, match=msg): + ct.fit_transform(X) + + +def test_remainder_set_output(): + """Check that the output is set for the remainder. + + Non-regression test for #26306. + """ + + pd = pytest.importorskip("pandas") + df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]}) + + ct = make_column_transformer( + (VarianceThreshold(), make_column_selector(dtype_include=bool)), + remainder=VarianceThreshold(), + verbose_feature_names_out=False, + ) + ct.set_output(transform="pandas") + + out = ct.fit_transform(df) + pd.testing.assert_frame_equal(out, df) + + ct.set_output(transform="default") + out = ct.fit_transform(df) + assert isinstance(out, np.ndarray) + + +# TODO(1.6): replace the warning by a ValueError exception +def test_transform_pd_na(): + """Check behavior when a tranformer's output contains pandas.NA + + It should emit a warning unless the output config is set to 'pandas'. + """ + pd = pytest.importorskip("pandas") + if not hasattr(pd, "Float64Dtype"): + pytest.skip( + "The issue with pd.NA tested here does not happen in old versions that do" + " not have the extension dtypes" + ) + df = pd.DataFrame({"a": [1.5, None]}) + ct = make_column_transformer(("passthrough", ["a"])) + # No warning with non-extension dtypes and np.nan + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.fit_transform(df) + df = df.convert_dtypes() + # Error with extension dtype and pd.NA + with pytest.warns(FutureWarning, match=r"set_output\(transform='pandas'\)"): + ct.fit_transform(df) + # No warning when output is set to pandas + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.set_output(transform="pandas") + ct.fit_transform(df) + ct.set_output(transform="default") + # No warning when there are no pd.NA + with warnings.catch_warnings(): + warnings.simplefilter("error") + ct.fit_transform(df.fillna(-1.0)) + + +def test_dataframe_different_dataframe_libraries(): + """Check fitting and transforming on pandas and polars dataframes.""" + pd = pytest.importorskip("pandas") + pl = pytest.importorskip("polars") + X_train_np = np.array([[0, 1], [2, 4], [4, 5]]) + X_test_np = np.array([[1, 2], [1, 3], [2, 3]]) + + # Fit on pandas and transform on polars + X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"]) + X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"]) + + ct = make_column_transformer((Trans(), [0, 1])) + ct.fit(X_train_pd) + + out_pl_in = ct.transform(X_test_pl) + assert_array_equal(out_pl_in, X_test_np) + + # Fit on polars and transform on pandas + X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"]) + X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"]) + ct.fit(X_train_pl) + + out_pd_in = ct.transform(X_test_pd) + assert_array_equal(out_pd_in, X_test_np) + + +@pytest.mark.parametrize("transform_output", ["default", "pandas"]) +def test_column_transformer_remainder_passthrough_naming_consistency(transform_output): + """Check that when `remainder="passthrough"`, inconsistent naming is handled + correctly by the underlying `FunctionTransformer`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28232 + """ + pd = pytest.importorskip("pandas") + X = pd.DataFrame(np.random.randn(10, 4)) + + preprocessor = ColumnTransformer( + transformers=[("scaler", StandardScaler(), [0, 1])], + remainder="passthrough", + ).set_output(transform=transform_output) + X_trans = preprocessor.fit_transform(X) + assert X_trans.shape == X.shape + + expected_column_names = [ + "scaler__x0", + "scaler__x1", + "remainder__x2", + "remainder__x3", + ] + if hasattr(X_trans, "columns"): + assert X_trans.columns.tolist() == expected_column_names + assert preprocessor.get_feature_names_out().tolist() == expected_column_names + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_column_transformer_column_renaming(dataframe_lib): + """Check that we properly rename columns when using `ColumnTransformer` and + selected columns are redundant between transformers. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28260 + """ + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]}) + + transformer = ColumnTransformer( + transformers=[ + ("A", "passthrough", ["x1", "x2", "x3"]), + ("B", FunctionTransformer(), ["x1", "x2"]), + ("C", StandardScaler(), ["x1", "x3"]), + # special case of empty transformer + ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]), + ], + verbose_feature_names_out=True, + ).set_output(transform=dataframe_lib) + df_trans = transformer.fit_transform(df) + assert list(df_trans.columns) == [ + "A__x1", + "A__x2", + "A__x3", + "B__x1", + "B__x2", + "C__x1", + "C__x3", + ] + + +@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"]) +def test_column_transformer_error_with_duplicated_columns(dataframe_lib): + """Check that we raise an error when using `ColumnTransformer` and + the columns names are duplicated between transformers.""" + lib = pytest.importorskip(dataframe_lib) + + df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]}) + + transformer = ColumnTransformer( + transformers=[ + ("A", "passthrough", ["x1", "x2", "x3"]), + ("B", FunctionTransformer(), ["x1", "x2"]), + ("C", StandardScaler(), ["x1", "x3"]), + # special case of empty transformer + ("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]), + ], + verbose_feature_names_out=False, + ).set_output(transform=dataframe_lib) + err_msg = re.escape( + "Duplicated feature names found before concatenating the outputs of the " + "transformers: ['x1', 'x2', 'x3'].\n" + "Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n" + "Transformer B has conflicting columns names: ['x1', 'x2'].\n" + "Transformer C has conflicting columns names: ['x1', 'x3'].\n" + ) + with pytest.raises(ValueError, match=err_msg): + transformer.fit_transform(df) + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_routing_passed_metadata_not_supported(method): + """Test that the right error message is raised when metadata is passed while + not supported when `enable_metadata_routing=False`.""" + + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y) + + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + getattr(trs, method)([[1]], sample_weight=[1], prop="a") + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_metadata_routing_for_column_transformer(method): + """Test that metadata is routed correctly for column transformer.""" + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + registry = _Registry() + sample_weight, metadata = [1], "a" + trs = ColumnTransformer( + [ + ( + "trans", + ConsumingTransformer(registry=registry) + .set_fit_request(sample_weight=True, metadata=True) + .set_transform_request(sample_weight=True, metadata=True), + [0], + ) + ] + ) + + if method == "transform": + trs.fit(X, y) + trs.transform(X, sample_weight=sample_weight, metadata=metadata) + else: + getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) + + assert len(registry) + for _trs in registry: + check_recorded_metadata( + obj=_trs, method=method, sample_weight=sample_weight, metadata=metadata + ) + + +@pytest.mark.usefixtures("enable_slep006") +def test_metadata_routing_no_fit_transform(): + """Test metadata routing when the sub-estimator doesn't implement + ``fit_transform``.""" + + class NoFitTransform(BaseEstimator): + def fit(self, X, y=None, sample_weight=None, metadata=None): + assert sample_weight + assert metadata + return self + + def transform(self, X, sample_weight=None, metadata=None): + assert sample_weight + assert metadata + return X + + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + _Registry() + sample_weight, metadata = [1], "a" + trs = ColumnTransformer( + [ + ( + "trans", + NoFitTransform() + .set_fit_request(sample_weight=True, metadata=True) + .set_transform_request(sample_weight=True, metadata=True), + [0], + ) + ] + ) + + trs.fit(X, y, sample_weight=sample_weight, metadata=metadata) + trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"]) +def test_metadata_routing_error_for_column_transformer(method): + """Test that the right error is raised when metadata is not requested.""" + X = np.array([[0, 1, 2], [2, 4, 6]]).T + y = [1, 2, 3] + sample_weight, metadata = [1], "a" + trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) + + error_message = ( + "[sample_weight, metadata] are passed but are not explicitly set as requested" + f" or not for ConsumingTransformer.{method}" + ) + with pytest.raises(ValueError, match=re.escape(error_message)): + if method == "transform": + trs.fit(X, y) + trs.transform(X, sample_weight=sample_weight, metadata=metadata) + else: + getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata) + + +@pytest.mark.usefixtures("enable_slep006") +def test_get_metadata_routing_works_without_fit(): + # Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186 + # Make sure ct.get_metadata_routing() works w/o having called fit. + ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])]) + ct.get_metadata_routing() + + +@pytest.mark.usefixtures("enable_slep006") +def test_remainder_request_always_present(): + # Test that remainder request is always present. + ct = ColumnTransformer( + [("trans", StandardScaler(), [0])], + remainder=ConsumingTransformer() + .set_fit_request(metadata=True) + .set_transform_request(metadata=True), + ) + router = ct.get_metadata_routing() + assert router.consumes("fit", ["metadata"]) == set(["metadata"]) + + +@pytest.mark.usefixtures("enable_slep006") +def test_unused_transformer_request_present(): + # Test that the request of a transformer is always present even when not + # used due to no selected columns. + ct = ColumnTransformer( + [ + ( + "trans", + ConsumingTransformer() + .set_fit_request(metadata=True) + .set_transform_request(metadata=True), + lambda X: [], + ) + ] + ) + router = ct.get_metadata_routing() + assert router.consumes("fit", ["metadata"]) == set(["metadata"]) + + +# End of Metadata Routing Tests +# ============================= diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py new file mode 100644 index 0000000000000000000000000000000000000000..53242b7e0277be30a9ebc1406dd8965e6bbcd96b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py @@ -0,0 +1,387 @@ +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, TransformerMixin, clone +from sklearn.compose import TransformedTargetRegressor +from sklearn.dummy import DummyRegressor +from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import FunctionTransformer, StandardScaler +from sklearn.utils._testing import assert_allclose, assert_no_warnings + +friedman = datasets.make_friedman1(random_state=0) + + +def test_transform_target_regressor_error(): + X, y = friedman + # provide a transformer and functions at the same time + regr = TransformedTargetRegressor( + regressor=LinearRegression(), + transformer=StandardScaler(), + func=np.exp, + inverse_func=np.log, + ) + with pytest.raises( + ValueError, + match="'transformer' and functions 'func'/'inverse_func' cannot both be set.", + ): + regr.fit(X, y) + # fit with sample_weight with a regressor which does not support it + sample_weight = np.ones((y.shape[0],)) + regr = TransformedTargetRegressor( + regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler() + ) + with pytest.raises( + TypeError, + match=r"fit\(\) got an unexpected " "keyword argument 'sample_weight'", + ): + regr.fit(X, y, sample_weight=sample_weight) + # func is given but inverse_func is not + regr = TransformedTargetRegressor(func=np.exp) + with pytest.raises( + ValueError, + match="When 'func' is provided, 'inverse_func' must also be provided", + ): + regr.fit(X, y) + + +def test_transform_target_regressor_invertible(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=LinearRegression(), + func=np.sqrt, + inverse_func=np.log, + check_inverse=True, + ) + with pytest.warns( + UserWarning, + match=( + "The provided functions or" + " transformer are not strictly inverse of each other." + ), + ): + regr.fit(X, y) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log + ) + regr.set_params(check_inverse=False) + assert_no_warnings(regr.fit, X, y) + + +def _check_standard_scaled(y, y_pred): + y_mean = np.mean(y, axis=0) + y_std = np.std(y, axis=0) + assert_allclose((y - y_mean) / y_std, y_pred) + + +def _check_shifted_by_one(y, y_pred): + assert_allclose(y + 1, y_pred) + + +def test_transform_target_regressor_functions(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.log, inverse_func=np.exp + ) + y_pred = regr.fit(X, y).predict(X) + # check the transformer output + y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze() + assert_allclose(np.log(y), y_tran) + assert_allclose( + y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze() + ) + assert y.shape == y_pred.shape + assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X))) + # check the regressor output + lr = LinearRegression().fit(X, regr.func(y)) + assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel()) + + +def test_transform_target_regressor_functions_multioutput(): + X = friedman[0] + y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T + regr = TransformedTargetRegressor( + regressor=LinearRegression(), func=np.log, inverse_func=np.exp + ) + y_pred = regr.fit(X, y).predict(X) + # check the transformer output + y_tran = regr.transformer_.transform(y) + assert_allclose(np.log(y), y_tran) + assert_allclose(y, regr.transformer_.inverse_transform(y_tran)) + assert y.shape == y_pred.shape + assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X))) + # check the regressor output + lr = LinearRegression().fit(X, regr.func(y)) + assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel()) + + +@pytest.mark.parametrize( + "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)] +) +def test_transform_target_regressor_1d_transformer(X, y): + # All transformer in scikit-learn expect 2D data. FunctionTransformer with + # validate=False lift this constraint without checking that the input is a + # 2D vector. We check the consistency of the data shape using a 1D and 2D y + # array. + transformer = FunctionTransformer( + func=lambda x: x + 1, inverse_func=lambda x: x - 1 + ) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + y_tran = regr.transformer_.transform(y) + _check_shifted_by_one(y, y_tran) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +@pytest.mark.parametrize( + "X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)] +) +def test_transform_target_regressor_2d_transformer(X, y): + # Check consistency with transformer accepting only 2D array and a 1D/2D y + # array. + transformer = StandardScaler() + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + if y.ndim == 1: # create a 2D array and squeeze results + y_tran = regr.transformer_.transform(y.reshape(-1, 1)) + else: + y_tran = regr.transformer_.transform(y) + _check_standard_scaled(y, y_tran.squeeze()) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + if y.ndim == 1: # create a 2D array and squeeze results + lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze()) + y_lr_pred = lr.predict(X).reshape(-1, 1) + y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze() + else: + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + y_pred2 = transformer2.inverse_transform(y_lr_pred) + + assert_allclose(y_pred, y_pred2) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +def test_transform_target_regressor_2d_transformer_multioutput(): + # Check consistency with transformer accepting only 2D array and a 2D y + # array. + X = friedman[0] + y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T + transformer = StandardScaler() + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + # consistency forward transform + y_tran = regr.transformer_.transform(y) + _check_standard_scaled(y, y_tran) + assert y.shape == y_pred.shape + # consistency inverse transform + assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze()) + # consistency of the regressor + lr = LinearRegression() + transformer2 = clone(transformer) + lr.fit(X, transformer2.fit_transform(y)) + y_lr_pred = lr.predict(X) + assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred)) + assert_allclose(regr.regressor_.coef_, lr.coef_) + + +def test_transform_target_regressor_3d_target(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/18866 + # Check with a 3D target with a transformer that reshapes the target + X = friedman[0] + y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2]) + + def flatten_data(data): + return data.reshape(data.shape[0], -1) + + def unflatten_data(data): + return data.reshape(data.shape[0], -1, 2) + + transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data) + regr = TransformedTargetRegressor( + regressor=LinearRegression(), transformer=transformer + ) + y_pred = regr.fit(X, y).predict(X) + assert y.shape == y_pred.shape + + +def test_transform_target_regressor_multi_to_single(): + X = friedman[0] + y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)]) + + def func(y): + out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2) + return out[:, np.newaxis] + + def inverse_func(y): + return y + + tt = TransformedTargetRegressor( + func=func, inverse_func=inverse_func, check_inverse=False + ) + tt.fit(X, y) + y_pred_2d_func = tt.predict(X) + assert y_pred_2d_func.shape == (100, 1) + + # force that the function only return a 1D array + def func(y): + return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2) + + tt = TransformedTargetRegressor( + func=func, inverse_func=inverse_func, check_inverse=False + ) + tt.fit(X, y) + y_pred_1d_func = tt.predict(X) + assert y_pred_1d_func.shape == (100, 1) + + assert_allclose(y_pred_1d_func, y_pred_2d_func) + + +class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator): + def fit(self, X, y=None): + assert isinstance(X, np.ndarray) + return self + + def transform(self, X): + assert isinstance(X, np.ndarray) + return X + + def inverse_transform(self, X): + assert isinstance(X, np.ndarray) + return X + + +class DummyCheckerListRegressor(DummyRegressor): + def fit(self, X, y, sample_weight=None): + assert isinstance(X, list) + return super().fit(X, y, sample_weight) + + def predict(self, X): + assert isinstance(X, list) + return super().predict(X) + + +def test_transform_target_regressor_ensure_y_array(): + # check that the target ``y`` passed to the transformer will always be a + # numpy array. Similarly, if ``X`` is passed as a list, we check that the + # predictor receive as it is. + X, y = friedman + tt = TransformedTargetRegressor( + transformer=DummyCheckerArrayTransformer(), + regressor=DummyCheckerListRegressor(), + check_inverse=False, + ) + tt.fit(X.tolist(), y.tolist()) + tt.predict(X.tolist()) + with pytest.raises(AssertionError): + tt.fit(X, y.tolist()) + with pytest.raises(AssertionError): + tt.predict(X) + + +class DummyTransformer(TransformerMixin, BaseEstimator): + """Dummy transformer which count how many time fit was called.""" + + def __init__(self, fit_counter=0): + self.fit_counter = fit_counter + + def fit(self, X, y=None): + self.fit_counter += 1 + return self + + def transform(self, X): + return X + + def inverse_transform(self, X): + return X + + +@pytest.mark.parametrize("check_inverse", [False, True]) +def test_transform_target_regressor_count_fit(check_inverse): + # regression test for gh-issue #11618 + # check that we only call a single time fit for the transformer + X, y = friedman + ttr = TransformedTargetRegressor( + transformer=DummyTransformer(), check_inverse=check_inverse + ) + ttr.fit(X, y) + assert ttr.transformer_.fit_counter == 1 + + +class DummyRegressorWithExtraFitParams(DummyRegressor): + def fit(self, X, y, sample_weight=None, check_input=True): + # on the test below we force this to false, we make sure this is + # actually passed to the regressor + assert not check_input + return super().fit(X, y, sample_weight) + + +def test_transform_target_regressor_pass_fit_parameters(): + X, y = friedman + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer() + ) + + regr.fit(X, y, check_input=False) + assert regr.transformer_.fit_counter == 1 + + +def test_transform_target_regressor_route_pipeline(): + X, y = friedman + + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer() + ) + estimators = [("normalize", StandardScaler()), ("est", regr)] + + pip = Pipeline(estimators) + pip.fit(X, y, **{"est__check_input": False}) + + assert regr.transformer_.fit_counter == 1 + + +class DummyRegressorWithExtraPredictParams(DummyRegressor): + def predict(self, X, check_input=True): + # In the test below we make sure that the check input parameter is + # passed as false + self.predict_called = True + assert not check_input + return super().predict(X) + + +def test_transform_target_regressor_pass_extra_predict_parameters(): + # Checks that predict kwargs are passed to regressor. + X, y = friedman + regr = TransformedTargetRegressor( + regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer() + ) + + regr.fit(X, y) + regr.predict(X, check_input=False) + assert regr.regressor_.predict_called diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..fc968bc750f5e995ed4092180e7434b2f780b9cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3e94cc7cea00f8a84fa5f6345203913a68efa42df18f87ddf9bead721bfd503 +size 7105 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..b11a1924f6085214fbedb70b19e689b05750cd11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e53f65eb811df43c206f3534bb3af0e5fed213bc37ed6ba36310157d6023803 +size 1050 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..b655e3ffa0818ef8048d461352aaa58599baa4e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09f66e6debdee2cd2b5ae59e0d6abbb73fc2b0e0185d2e1957e9ebb51e23aa22 +size 57523 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e885acfcf3f5a562290d081e204046372238233 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8378025ad2519d649d02e32bd98990db4ab572357d9f09841c2fbfbb4fefad29 +size 196653 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56350635174c5d062428d0128910faa0476b66ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77f6ec41e353afdf8bdff2ea981b2955535d8d83294f8cfa49cf4e423dd5638 +size 142987 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..93289f3064f0d69614a88c2d71922a7a31a92b4a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b105adfedc6b6b82f4695ca9bfe232393034cdf79803523f397a6dc5bf824d1 +size 1544 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a273bcdfbb3409d37146d32081c82fbc3e7c6e52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:046f5e60564693f0f3b8e382725c8012c3e058647139c24685cec984e40fcd00 +size 1032 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d738f893891ff3d747ee71e709301a481d09430e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44b9b0d290a1e339695a431438f84080071c5635161c3977dd17f4c27b00a34a +size 1507 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..6619336b53b5d71c6bd5c7f2de8e11b88c33ed31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee1dcdf58f2f1072f7dd1b43388969c51bc6cfe776e3e9465ae6a756e5ddb10a +size 1152 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-40981.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-40981.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..8015288dcd2399e2c86a4050ce81ec49902d6baf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-40981.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c26dcbe30cfb39161f305b2b3d43a9b50adc8b368d0749568c47106cbdb20897 +size 553 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..f8f940438f61ac6fbeaa00c46741c80579af46eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4da63a60163340b6e18922abfe7f1f2a7a7da23da63c269324985d61ffaa6075 +size 318 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24e0e87d484661242d46a4cf18e2e6695736fa26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95f0938dfdf1b87d0ffc4d526f2c91e097ef7689480b693970126d908f291030 +size 320 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..61a0b7bc6260b766ad0c03786a40d306843a53b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f443b3add7375ca92ece9296b8449a0780305d3b5210c84994bdeab36271d62a +size 584 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdf-42074.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdf-42074.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..89818ff01633a27976f11fd38a70cb1a652dce77 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdf-42074.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38b74e7f02a61ff55bcfac4d87103d5bffc43febb0c019d9aaa162f8f7693068 +size 272 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c152f7e5d9f72441b2fc6aa9f96af8f9ef9fc690 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8742a74bd5bc120acd9186c8a8737cb420ed9b009fade00b24e7ce5217797f2c +size 722 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..010258ddd3f64ab3d63665f106946a34b241d68e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f623e777c0a36ae6c82fae10a7c2088cb383298ea244595bf8dc95449c9be4c4 +size 2326 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..92ba4281fe86b5273792d24afaabb04eef03199d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d38fdd601b67bb9c6d16152f53ddf166a0cfcfef4fa86438e899bfe449226c +size 1798 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b2fce3413fd38f4c4f80ef7d6b198b4ac740a90a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:407424fb79cc30b8e9ff90900b3bf29244ac7f3797f278b5be602843f959b4ee +size 425 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..52ae92392967d187709107d1c1bc9709c085b519 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0703b0ae20b9ff75087dc601640ee58f1c2ad6768858ea21a245151da9ba8e4c +size 301 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..6bde2de0c6047726f26476a514d27a0d03c7d4b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70d4596ad879547863109da8675c2b789d07df66b526d7ebcbce9616c4c9b94c +size 347 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..911f6823bb1bf0d9de5120e23e902e9a0a39a2bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8743b2d93d2c62a82fb47e1fbc002b97e25adcfb5bf1fcb26b58ad0bed15bd48 +size 1074 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..225208c948bd5270b3911828bead9d2fd3af3fbb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e96142b5e00dfec2617b0c22d7192b340ae2c28ec3ffc3a894c5be746b970a59 +size 3303 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0da99dd69a44ba3c0b6e4b5f4be05c495a564593 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..714dea7c00e87636a7e131e7f3cd5d1f59e632ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ae646eff426a6f8f05d1711fad0c3f8da6ee047 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a5d7d6c58e3d9f590efa582a92b60b8f774fbf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3874cba760d3398d56bd0106ad8d13a9526b3746 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13605ee8d9fbec5e88b6d5f95330989541d1c721 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_incremental_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b3d8a2df686e139a3a69b830c563e52b6eb48e3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af717934f16ab8414503cc30fbbde79c63b2b425 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4120378009b57ae380ff3bf39c592cf245ea5807 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3945088e4586306f7f7c14cd9d057dbb0f9bf65 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5f5ae5e1e236181cdd67978de63218c565a8c70 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f301241848507be10124176108b6c1fe0c358c85 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eab9b51f4a903e6e9cfcace73237b2682c513627 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..760c732360d767bbdfc80fd7b84e01a606cfdf70 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9931667da4aceadd6c50e515b52ca0b6c8370256 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c69b7009f4d6b5f89bc236c98e5a506a1c24324 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_fastica.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1bc7010ecb19b07ce65e78c917c8a57431544ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edd197bd8bab78fec1b1031a847fce34a19d5f68 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_kernel_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0488785f9b91082a861e4c639c9807b018e098f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c1fbd04952844dcd8f7409eed6b2bacacea5e28 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc75c89b777aa533a7c80029769d43b160888066 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86a1cc4f562e69b35cfd93310d09f08e85cdd04d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e661c507e9ac3b661a05838fa8df788cd9383b0b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py new file mode 100644 index 0000000000000000000000000000000000000000..b79df4db8cd74a70452ca7212b36b7ddc305caa3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_dict_learning.py @@ -0,0 +1,983 @@ +import itertools +import warnings +from functools import partial + +import numpy as np +import pytest + +import sklearn +from sklearn.base import clone +from sklearn.decomposition import ( + DictionaryLearning, + MiniBatchDictionaryLearning, + SparseCoder, + dict_learning, + dict_learning_online, + sparse_encode, +) +from sklearn.decomposition._dict_learning import _update_dict +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils import check_array +from sklearn.utils._testing import ( + TempMemmap, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.estimator_checks import ( + check_transformer_data_not_an_array, + check_transformer_general, + check_transformers_unfitted, +) +from sklearn.utils.parallel import Parallel + +rng_global = np.random.RandomState(0) +n_samples, n_features = 10, 8 +X = rng_global.randn(n_samples, n_features) + + +def test_sparse_encode_shapes_omp(): + rng = np.random.RandomState(0) + algorithms = ["omp", "lasso_lars", "lasso_cd", "lars", "threshold"] + for n_components, n_samples in itertools.product([1, 5], [1, 9]): + X_ = rng.randn(n_samples, n_features) + dictionary = rng.randn(n_components, n_features) + for algorithm, n_jobs in itertools.product(algorithms, [1, 2]): + code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs) + assert code.shape == (n_samples, n_components) + + +def test_dict_learning_shapes(): + n_components = 5 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + + n_components = 1 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + assert dico.transform(X).shape == (X.shape[0], n_components) + + +def test_dict_learning_overcomplete(): + n_components = 12 + dico = DictionaryLearning(n_components, random_state=0).fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_max_iter(): + def ricker_function(resolution, center, width): + """Discrete sub-sampled Ricker (Mexican hat) wavelet""" + x = np.linspace(0, resolution - 1, resolution) + x = ( + (2 / (np.sqrt(3 * width) * np.pi**0.25)) + * (1 - (x - center) ** 2 / width**2) + * np.exp(-((x - center) ** 2) / (2 * width**2)) + ) + return x + + def ricker_matrix(width, resolution, n_components): + """Dictionary of Ricker (Mexican hat) wavelets""" + centers = np.linspace(0, resolution - 1, n_components) + D = np.empty((n_components, resolution)) + for i, center in enumerate(centers): + D[i] = ricker_function(resolution, center, width) + D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis] + return D + + transform_algorithm = "lasso_cd" + resolution = 1024 + subsampling = 3 # subsampling factor + n_components = resolution // subsampling + + # Compute a wavelet dictionary + D_multi = np.r_[ + tuple( + ricker_matrix( + width=w, resolution=resolution, n_components=n_components // 5 + ) + for w in (10, 50, 100, 500, 1000) + ) + ] + + X = np.linspace(0, resolution - 1, resolution) + first_quarter = X < resolution / 4 + X[first_quarter] = 3.0 + X[np.logical_not(first_quarter)] = -1.0 + X = X.reshape(1, -1) + + # check that the underlying model fails to converge + with pytest.warns(ConvergenceWarning): + model = SparseCoder( + D_multi, transform_algorithm=transform_algorithm, transform_max_iter=1 + ) + model.fit_transform(X) + + # check that the underlying model converges w/o warnings + with warnings.catch_warnings(): + warnings.simplefilter("error", ConvergenceWarning) + model = SparseCoder( + D_multi, transform_algorithm=transform_algorithm, transform_max_iter=2000 + ) + model.fit_transform(X) + + +def test_dict_learning_lars_positive_parameter(): + n_components = 5 + alpha = 1 + err_msg = "Positive constraint not supported for 'lars' coding method." + with pytest.raises(ValueError, match=err_msg): + dict_learning(X, n_components, alpha=alpha, positive_code=True) + + +@pytest.mark.parametrize( + "transform_algorithm", + [ + "lasso_lars", + "lasso_cd", + "threshold", + ], +) +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_positivity(transform_algorithm, positive_code, positive_dict): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm=transform_algorithm, + random_state=0, + positive_code=positive_code, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + code = dico.transform(X) + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_lars_dict_positivity(positive_dict): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + random_state=0, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + + +def test_dict_learning_lars_code_positivity(): + n_components = 5 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + random_state=0, + positive_code=True, + fit_algorithm="cd", + ).fit(X) + + err_msg = "Positive constraint not supported for '{}' coding method." + err_msg = err_msg.format("lars") + with pytest.raises(ValueError, match=err_msg): + dico.transform(X) + + +def test_dict_learning_reconstruction(): + n_components = 12 + dico = DictionaryLearning( + n_components, transform_algorithm="omp", transform_alpha=0.001, random_state=0 + ) + code = dico.fit(X).transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X) + + dico.set_params(transform_algorithm="lasso_lars") + code = dico.transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) + + # used to test lars here too, but there's no guarantee the number of + # nonzero atoms is right. + + +def test_dict_learning_reconstruction_parallel(): + # regression test that parallel reconstruction works with n_jobs>1 + n_components = 12 + dico = DictionaryLearning( + n_components, + transform_algorithm="omp", + transform_alpha=0.001, + random_state=0, + n_jobs=4, + ) + code = dico.fit(X).transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X) + + dico.set_params(transform_algorithm="lasso_lars") + code = dico.transform(X) + assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) + + +def test_dict_learning_lassocd_readonly_data(): + n_components = 12 + with TempMemmap(X) as X_read_only: + dico = DictionaryLearning( + n_components, + transform_algorithm="lasso_cd", + transform_alpha=0.001, + random_state=0, + n_jobs=4, + ) + with ignore_warnings(category=ConvergenceWarning): + code = dico.fit(X_read_only).transform(X_read_only) + assert_array_almost_equal( + np.dot(code, dico.components_), X_read_only, decimal=2 + ) + + +def test_dict_learning_nonzero_coefs(): + n_components = 4 + dico = DictionaryLearning( + n_components, + transform_algorithm="lars", + transform_n_nonzero_coefs=3, + random_state=0, + ) + code = dico.fit(X).transform(X[np.newaxis, 1]) + assert len(np.flatnonzero(code)) == 3 + + dico.set_params(transform_algorithm="omp") + code = dico.transform(X[np.newaxis, 1]) + assert len(np.flatnonzero(code)) == 3 + + +def test_dict_learning_split(): + n_components = 5 + dico = DictionaryLearning( + n_components, transform_algorithm="threshold", random_state=0 + ) + code = dico.fit(X).transform(X) + dico.split_sign = True + split_code = dico.transform(X) + + assert_array_almost_equal( + split_code[:, :n_components] - split_code[:, n_components:], code + ) + + +def test_dict_learning_online_shapes(): + rng = np.random.RandomState(0) + n_components = 8 + + code, dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + max_iter=10, + method="cd", + random_state=rng, + return_code=True, + ) + assert code.shape == (n_samples, n_components) + assert dictionary.shape == (n_components, n_features) + assert np.dot(code, dictionary).shape == X.shape + + dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + max_iter=10, + method="cd", + random_state=rng, + return_code=False, + ) + assert dictionary.shape == (n_components, n_features) + + +def test_dict_learning_online_lars_positive_parameter(): + err_msg = "Positive constraint not supported for 'lars' coding method." + with pytest.raises(ValueError, match=err_msg): + dict_learning_online(X, batch_size=4, max_iter=10, positive_code=True) + + +@pytest.mark.parametrize( + "transform_algorithm", + [ + "lasso_lars", + "lasso_cd", + "threshold", + ], +) +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_minibatch_dictionary_learning_positivity( + transform_algorithm, positive_code, positive_dict +): + n_components = 8 + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=10, + transform_algorithm=transform_algorithm, + random_state=0, + positive_code=positive_code, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + code = dico.transform(X) + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_minibatch_dictionary_learning_lars(positive_dict): + n_components = 8 + + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=10, + transform_algorithm="lars", + random_state=0, + positive_dict=positive_dict, + fit_algorithm="cd", + ).fit(X) + + if positive_dict: + assert (dico.components_ >= 0).all() + else: + assert (dico.components_ < 0).any() + + +@pytest.mark.parametrize("positive_code", [False, True]) +@pytest.mark.parametrize("positive_dict", [False, True]) +def test_dict_learning_online_positivity(positive_code, positive_dict): + rng = np.random.RandomState(0) + n_components = 8 + + code, dictionary = dict_learning_online( + X, + n_components=n_components, + batch_size=4, + method="cd", + alpha=1, + random_state=rng, + positive_dict=positive_dict, + positive_code=positive_code, + ) + if positive_dict: + assert (dictionary >= 0).all() + else: + assert (dictionary < 0).any() + if positive_code: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +def test_dict_learning_online_verbosity(): + # test verbosity for better coverage + n_components = 5 + import sys + from io import StringIO + + old_stdout = sys.stdout + try: + sys.stdout = StringIO() + + # convergence monitoring verbosity + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, verbose=1, tol=0.1, random_state=0 + ) + dico.fit(X) + dico = MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=5, + verbose=1, + max_no_improvement=2, + random_state=0, + ) + dico.fit(X) + # higher verbosity level + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, verbose=2, random_state=0 + ) + dico.fit(X) + + # function API verbosity + dict_learning_online( + X, + n_components=n_components, + batch_size=4, + alpha=1, + verbose=1, + random_state=0, + ) + dict_learning_online( + X, + n_components=n_components, + batch_size=4, + alpha=1, + verbose=2, + random_state=0, + ) + finally: + sys.stdout = old_stdout + + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_estimator_shapes(): + n_components = 5 + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, random_state=0 + ) + dico.fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_overcomplete(): + n_components = 12 + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=5, random_state=0 + ).fit(X) + assert dico.components_.shape == (n_components, n_features) + + +def test_dict_learning_online_initialization(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) + dico = MiniBatchDictionaryLearning( + n_components, batch_size=4, max_iter=0, dict_init=V, random_state=0 + ).fit(X) + assert_array_equal(dico.components_, V) + + +def test_dict_learning_online_readonly_initialization(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) + V.setflags(write=False) + MiniBatchDictionaryLearning( + n_components, + batch_size=4, + max_iter=1, + dict_init=V, + random_state=0, + shuffle=False, + ).fit(X) + + +def test_dict_learning_online_partial_fit(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + dict1 = MiniBatchDictionaryLearning( + n_components, + max_iter=10, + batch_size=1, + alpha=1, + shuffle=False, + dict_init=V, + max_no_improvement=None, + tol=0.0, + random_state=0, + ).fit(X) + dict2 = MiniBatchDictionaryLearning( + n_components, alpha=1, dict_init=V, random_state=0 + ) + for i in range(10): + for sample in X: + dict2.partial_fit(sample[np.newaxis, :]) + + assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0) + assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) + + # partial_fit should ignore max_iter (#17433) + assert dict1.n_steps_ == dict2.n_steps_ == 100 + + +def test_sparse_encode_shapes(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"): + code = sparse_encode(X, V, algorithm=algo) + assert code.shape == (n_samples, n_components) + + +@pytest.mark.parametrize("algo", ["lasso_lars", "lasso_cd", "threshold"]) +@pytest.mark.parametrize("positive", [False, True]) +def test_sparse_encode_positivity(algo, positive): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + code = sparse_encode(X, V, algorithm=algo, positive=positive) + if positive: + assert (code >= 0).all() + else: + assert (code < 0).any() + + +@pytest.mark.parametrize("algo", ["lars", "omp"]) +def test_sparse_encode_unavailable_positivity(algo): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + err_msg = "Positive constraint not supported for '{}' coding method." + err_msg = err_msg.format(algo) + with pytest.raises(ValueError, match=err_msg): + sparse_encode(X, V, algorithm=algo, positive=True) + + +def test_sparse_encode_input(): + n_components = 100 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + Xf = check_array(X, order="F") + for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"): + a = sparse_encode(X, V, algorithm=algo) + b = sparse_encode(Xf, V, algorithm=algo) + assert_array_almost_equal(a, b) + + +def test_sparse_encode_error(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + code = sparse_encode(X, V, alpha=0.001) + assert not np.all(code == 0) + assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1 + + +def test_sparse_encode_error_default_sparsity(): + rng = np.random.RandomState(0) + X = rng.randn(100, 64) + D = rng.randn(2, 64) + code = ignore_warnings(sparse_encode)(X, D, algorithm="omp", n_nonzero_coefs=None) + assert code.shape == (100, 2) + + +def test_sparse_coder_estimator(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + coder = SparseCoder( + dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001 + ).transform(X) + assert not np.all(coder == 0) + assert np.sqrt(np.sum((np.dot(coder, V) - X) ** 2)) < 0.1 + + +def test_sparse_coder_estimator_clone(): + n_components = 12 + rng = np.random.RandomState(0) + V = rng.randn(n_components, n_features) # random init + V /= np.sum(V**2, axis=1)[:, np.newaxis] + coder = SparseCoder( + dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001 + ) + cloned = clone(coder) + assert id(cloned) != id(coder) + np.testing.assert_allclose(cloned.dictionary, coder.dictionary) + assert id(cloned.dictionary) != id(coder.dictionary) + assert cloned.n_components_ == coder.n_components_ + assert cloned.n_features_in_ == coder.n_features_in_ + data = np.random.rand(n_samples, n_features).astype(np.float32) + np.testing.assert_allclose(cloned.transform(data), coder.transform(data)) + + +def test_sparse_coder_parallel_mmap(): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/5956 + # Test that SparseCoder does not error by passing reading only + # arrays to child processes + + rng = np.random.RandomState(777) + n_components, n_features = 40, 64 + init_dict = rng.rand(n_components, n_features) + # Ensure that `data` is >2M. Joblib memory maps arrays + # if they are larger than 1MB. The 4 accounts for float32 + # data type + n_samples = int(2e6) // (4 * n_features) + data = np.random.rand(n_samples, n_features).astype(np.float32) + + sc = SparseCoder(init_dict, transform_algorithm="omp", n_jobs=2) + sc.fit_transform(data) + + +def test_sparse_coder_common_transformer(): + rng = np.random.RandomState(777) + n_components, n_features = 40, 3 + init_dict = rng.rand(n_components, n_features) + + sc = SparseCoder(init_dict) + + check_transformer_data_not_an_array(sc.__class__.__name__, sc) + check_transformer_general(sc.__class__.__name__, sc) + check_transformer_general_memmap = partial( + check_transformer_general, readonly_memmap=True + ) + check_transformer_general_memmap(sc.__class__.__name__, sc) + check_transformers_unfitted(sc.__class__.__name__, sc) + + +def test_sparse_coder_n_features_in(): + d = np.array([[1, 2, 3], [1, 2, 3]]) + sc = SparseCoder(d) + assert sc.n_features_in_ == d.shape[1] + + +def test_update_dict(): + # Check the dict update in batch mode vs online mode + # Non-regression test for #4866 + rng = np.random.RandomState(0) + + code = np.array([[0.5, -0.5], [0.1, 0.9]]) + dictionary = np.array([[1.0, 0.0], [0.6, 0.8]]) + + X = np.dot(code, dictionary) + rng.randn(2, 2) + + # full batch update + newd_batch = dictionary.copy() + _update_dict(newd_batch, X, code) + + # online update + A = np.dot(code.T, code) + B = np.dot(X.T, code) + newd_online = dictionary.copy() + _update_dict(newd_online, X, code, A, B) + + assert_allclose(newd_batch, newd_online) + + +@pytest.mark.parametrize( + "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +# Note: do not check integer input because `lasso_lars` and `lars` fail with +# `ValueError` in `_lars_path_solver` +def test_sparse_encode_dtype_match(data_type, algorithm): + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + code = sparse_encode( + X.astype(data_type), dictionary.astype(data_type), algorithm=algorithm + ) + assert code.dtype == data_type + + +@pytest.mark.parametrize( + "algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +def test_sparse_encode_numerical_consistency(algorithm): + # verify numerical consistency among np.float32 and np.float64 + rtol = 1e-4 + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + code_32 = sparse_encode( + X.astype(np.float32), dictionary.astype(np.float32), algorithm=algorithm + ) + code_64 = sparse_encode( + X.astype(np.float64), dictionary.astype(np.float64), algorithm=algorithm + ) + assert_allclose(code_32, code_64, rtol=rtol) + + +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize("data_type", (np.float32, np.float64)) +# Note: do not check integer input because `lasso_lars` and `lars` fail with +# `ValueError` in `_lars_path_solver` +def test_sparse_coder_dtype_match(data_type, transform_algorithm): + # Verify preserving dtype for transform in sparse coder + n_components = 6 + rng = np.random.RandomState(0) + dictionary = rng.randn(n_components, n_features) + coder = SparseCoder( + dictionary.astype(data_type), transform_algorithm=transform_algorithm + ) + code = coder.transform(X.astype(data_type)) + assert code.dtype == data_type + + +@pytest.mark.parametrize("fit_algorithm", ("lars", "cd")) +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dictionary_learning_dtype_match( + data_type, + expected_type, + fit_algorithm, + transform_algorithm, +): + # Verify preserving dtype for fit and transform in dictionary learning class + dict_learner = DictionaryLearning( + n_components=8, + fit_algorithm=fit_algorithm, + transform_algorithm=transform_algorithm, + random_state=0, + ) + dict_learner.fit(X.astype(data_type)) + assert dict_learner.components_.dtype == expected_type + assert dict_learner.transform(X.astype(data_type)).dtype == expected_type + + +@pytest.mark.parametrize("fit_algorithm", ("lars", "cd")) +@pytest.mark.parametrize( + "transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp") +) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_minibatch_dictionary_learning_dtype_match( + data_type, + expected_type, + fit_algorithm, + transform_algorithm, +): + # Verify preserving dtype for fit and transform in minibatch dictionary learning + dict_learner = MiniBatchDictionaryLearning( + n_components=8, + batch_size=10, + fit_algorithm=fit_algorithm, + transform_algorithm=transform_algorithm, + max_iter=100, + tol=1e-1, + random_state=0, + ) + dict_learner.fit(X.astype(data_type)) + + assert dict_learner.components_.dtype == expected_type + assert dict_learner.transform(X.astype(data_type)).dtype == expected_type + assert dict_learner._A.dtype == expected_type + assert dict_learner._B.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dict_learning_dtype_match(data_type, expected_type, method): + # Verify output matrix dtype + rng = np.random.RandomState(0) + n_components = 8 + code, dictionary, _ = dict_learning( + X.astype(data_type), + n_components=n_components, + alpha=1, + random_state=rng, + method=method, + ) + assert code.dtype == expected_type + assert dictionary.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_dict_learning_numerical_consistency(method): + # verify numerically consistent among np.float32 and np.float64 + rtol = 1e-6 + n_components = 4 + alpha = 2 + + U_64, V_64, _ = dict_learning( + X.astype(np.float64), + n_components=n_components, + alpha=alpha, + random_state=0, + method=method, + ) + U_32, V_32, _ = dict_learning( + X.astype(np.float32), + n_components=n_components, + alpha=alpha, + random_state=0, + method=method, + ) + + # Optimal solution (U*, V*) is not unique. + # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal, + # and (column permutated U*, row permutated V*) are also optional + # as long as holding UV. + # So here UV, ||U||_1,1 and sum(||V_k||_2^2) are verified + # instead of comparing directly U and V. + assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol) + assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol) + assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol) + # verify an obtained solution is not degenerate + assert np.mean(U_64 != 0.0) > 0.05 + assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0) + + +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_dict_learning_online_dtype_match(data_type, expected_type, method): + # Verify output matrix dtype + rng = np.random.RandomState(0) + n_components = 8 + code, dictionary = dict_learning_online( + X.astype(data_type), + n_components=n_components, + alpha=1, + batch_size=10, + random_state=rng, + method=method, + ) + assert code.dtype == expected_type + assert dictionary.dtype == expected_type + + +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_dict_learning_online_numerical_consistency(method): + # verify numerically consistent among np.float32 and np.float64 + rtol = 1e-4 + n_components = 4 + alpha = 1 + + U_64, V_64 = dict_learning_online( + X.astype(np.float64), + n_components=n_components, + max_iter=1_000, + alpha=alpha, + batch_size=10, + random_state=0, + method=method, + tol=0.0, + max_no_improvement=None, + ) + U_32, V_32 = dict_learning_online( + X.astype(np.float32), + n_components=n_components, + max_iter=1_000, + alpha=alpha, + batch_size=10, + random_state=0, + method=method, + tol=0.0, + max_no_improvement=None, + ) + + # Optimal solution (U*, V*) is not unique. + # If (U*, V*) is optimal solution, (-U*,-V*) is also optimal, + # and (column permutated U*, row permutated V*) are also optional + # as long as holding UV. + # So here UV, ||U||_1,1 and sum(||V_k||_2) are verified + # instead of comparing directly U and V. + assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol) + assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol) + assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol) + # verify an obtained solution is not degenerate + assert np.mean(U_64 != 0.0) > 0.05 + assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0) + + +@pytest.mark.parametrize( + "estimator", + [ + SparseCoder(X.T), + DictionaryLearning(), + MiniBatchDictionaryLearning(batch_size=4, max_iter=10), + ], + ids=lambda x: x.__class__.__name__, +) +def test_get_feature_names_out(estimator): + """Check feature names for dict learning estimators.""" + estimator.fit(X) + n_components = X.shape[1] + + feature_names_out = estimator.get_feature_names_out() + estimator_name = estimator.__class__.__name__.lower() + assert_array_equal( + feature_names_out, + [f"{estimator_name}{i}" for i in range(n_components)], + ) + + +def test_cd_work_on_joblib_memmapped_data(monkeypatch): + monkeypatch.setattr( + sklearn.decomposition._dict_learning, + "Parallel", + partial(Parallel, max_nbytes=100), + ) + + rng = np.random.RandomState(0) + X_train = rng.randn(10, 10) + + dict_learner = DictionaryLearning( + n_components=5, + random_state=0, + n_jobs=2, + fit_algorithm="cd", + max_iter=50, + verbose=True, + ) + + # This must run and complete without error. + dict_learner.fit(X_train) + + +# TODO(1.6): remove in 1.6 +def test_xxx(): + warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + MiniBatchDictionaryLearning(max_iter=None, random_state=0).fit(X) + with pytest.warns(FutureWarning, match=warn_msg): + dict_learning_online(X, max_iter=None, random_state=0) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff14f8d71722463e4cd4f8c815c957ffd7ba9f0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_factor_analysis.py @@ -0,0 +1,116 @@ +# Author: Christian Osendorfer +# Alexandre Gramfort +# License: BSD3 + +from itertools import combinations + +import numpy as np +import pytest + +from sklearn.decomposition import FactorAnalysis +from sklearn.decomposition._factor_analysis import _ortho_rotation +from sklearn.exceptions import ConvergenceWarning +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + ignore_warnings, +) + + +# Ignore warnings from switching to more power iterations in randomized_svd +@ignore_warnings +def test_factor_analysis(): + # Test FactorAnalysis ability to recover the data covariance structure + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 20, 5, 3 + + # Some random settings for the generative model + W = rng.randn(n_components, n_features) + # latent variable of dim 3, 20 of it + h = rng.randn(n_samples, n_components) + # using gamma to model different noise variance + # per component + noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features) + + # generate observations + # wlog, mean is 0 + X = np.dot(h, W) + noise + + fas = [] + for method in ["randomized", "lapack"]: + fa = FactorAnalysis(n_components=n_components, svd_method=method) + fa.fit(X) + fas.append(fa) + + X_t = fa.transform(X) + assert X_t.shape == (n_samples, n_components) + + assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum()) + assert_almost_equal(fa.score_samples(X).mean(), fa.score(X)) + + diff = np.all(np.diff(fa.loglike_)) + assert diff > 0.0, "Log likelihood dif not increase" + + # Sample Covariance + scov = np.cov(X, rowvar=0.0, bias=1.0) + + # Model Covariance + mcov = fa.get_covariance() + diff = np.sum(np.abs(scov - mcov)) / W.size + assert diff < 0.1, "Mean absolute difference is %f" % diff + fa = FactorAnalysis( + n_components=n_components, noise_variance_init=np.ones(n_features) + ) + with pytest.raises(ValueError): + fa.fit(X[:, :2]) + + def f(x, y): + return np.abs(getattr(x, y)) # sign will not be equal + + fa1, fa2 = fas + for attr in ["loglike_", "components_", "noise_variance_"]: + assert_almost_equal(f(fa1, attr), f(fa2, attr)) + + fa1.max_iter = 1 + fa1.verbose = True + with pytest.warns(ConvergenceWarning): + fa1.fit(X) + + # Test get_covariance and get_precision with n_components == n_features + # with n_components < n_features and with n_components == 0 + for n_components in [0, 2, X.shape[1]]: + fa.n_components = n_components + fa.fit(X) + cov = fa.get_covariance() + precision = fa.get_precision() + assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) + + # test rotation + n_components = 2 + + results, projections = {}, {} + for method in (None, "varimax", "quartimax"): + fa_var = FactorAnalysis(n_components=n_components, rotation=method) + results[method] = fa_var.fit_transform(X) + projections[method] = fa_var.get_covariance() + for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2): + assert not np.allclose(results[rot1], results[rot2]) + assert np.allclose(projections[rot1], projections[rot2], atol=3) + + # test against R's psych::principal with rotate="varimax" + # (i.e., the values below stem from rotating the components in R) + # R's factor analysis returns quite different values; therefore, we only + # test the rotation itself + factors = np.array( + [ + [0.89421016, -0.35854928, -0.27770122, 0.03773647], + [-0.45081822, -0.89132754, 0.0932195, -0.01787973], + [0.99500666, -0.02031465, 0.05426497, -0.11539407], + [0.96822861, -0.06299656, 0.24411001, 0.07540887], + ] + ) + r_solution = np.array( + [[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]] + ) + rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T + assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7c8aa03f174ca6b372cd6c42de15cb4a43d15a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_incremental_pca.py @@ -0,0 +1,452 @@ +"""Tests for Incremental PCA.""" +import warnings + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn import datasets +from sklearn.decomposition import PCA, IncrementalPCA +from sklearn.utils._testing import ( + assert_allclose_dense_sparse, + assert_almost_equal, + assert_array_almost_equal, +) +from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS + +iris = datasets.load_iris() + + +def test_incremental_pca(): + # Incremental PCA on dense arrays. + X = iris.data + batch_size = X.shape[0] // 3 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size) + pca = PCA(n_components=2) + pca.fit_transform(X) + + X_transformed = ipca.fit_transform(X) + + assert X_transformed.shape == (X.shape[0], 2) + np.testing.assert_allclose( + ipca.explained_variance_ratio_.sum(), + pca.explained_variance_ratio_.sum(), + rtol=1e-3, + ) + + for n_components in [1, 2, X.shape[1]]: + ipca = IncrementalPCA(n_components, batch_size=batch_size) + ipca.fit(X) + cov = ipca.get_covariance() + precision = ipca.get_precision() + np.testing.assert_allclose( + np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13 + ) + + +@pytest.mark.parametrize( + "sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS +) +def test_incremental_pca_sparse(sparse_container): + # Incremental PCA on sparse arrays. + X = iris.data + pca = PCA(n_components=2) + pca.fit_transform(X) + X_sparse = sparse_container(X) + batch_size = X_sparse.shape[0] // 3 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size) + + X_transformed = ipca.fit_transform(X_sparse) + + assert X_transformed.shape == (X_sparse.shape[0], 2) + np.testing.assert_allclose( + ipca.explained_variance_ratio_.sum(), + pca.explained_variance_ratio_.sum(), + rtol=1e-3, + ) + + for n_components in [1, 2, X.shape[1]]: + ipca = IncrementalPCA(n_components, batch_size=batch_size) + ipca.fit(X_sparse) + cov = ipca.get_covariance() + precision = ipca.get_precision() + np.testing.assert_allclose( + np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13 + ) + + with pytest.raises( + TypeError, + match=( + "IncrementalPCA.partial_fit does not support " + "sparse input. Either convert data to dense " + "or use IncrementalPCA.fit to do so in batches." + ), + ): + ipca.partial_fit(X_sparse) + + +def test_incremental_pca_check_projection(): + # Test that the projection of data is correct. + rng = np.random.RandomState(1999) + n, p = 100, 3 + X = rng.randn(n, p) * 0.1 + X[:10] += np.array([3, 4, 5]) + Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) + + # Get the reconstruction of the generated data X + # Note that Xt has the same "components" as X, just separated + # This is what we want to ensure is recreated correctly + Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt) + + # Normalize + Yt /= np.sqrt((Yt**2).sum()) + + # Make sure that the first element of Yt is ~1, this means + # the reconstruction worked as expected + assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1) + + +def test_incremental_pca_inverse(): + # Test that the projection of data can be inverted. + rng = np.random.RandomState(1999) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X) + Y = ipca.transform(X) + Y_inverse = ipca.inverse_transform(Y) + assert_almost_equal(X, Y_inverse, decimal=3) + + +def test_incremental_pca_validation(): + # Test that n_components is <= n_features. + X = np.array([[0, 1, 0], [1, 0, 0]]) + n_samples, n_features = X.shape + n_components = 4 + with pytest.raises( + ValueError, + match=( + "n_components={} invalid" + " for n_features={}, need more rows than" + " columns for IncrementalPCA" + " processing".format(n_components, n_features) + ), + ): + IncrementalPCA(n_components, batch_size=10).fit(X) + + # Tests that n_components is also <= n_samples. + n_components = 3 + with pytest.raises( + ValueError, + match=( + "n_components={} must be" + " less or equal to the batch number of" + " samples {}".format(n_components, n_samples) + ), + ): + IncrementalPCA(n_components=n_components).partial_fit(X) + + +def test_n_samples_equal_n_components(): + # Ensures no warning is raised when n_samples==n_components + # Non-regression test for gh-19050 + ipca = IncrementalPCA(n_components=5) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + ipca.partial_fit(np.random.randn(5, 7)) + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + ipca.fit(np.random.randn(5, 7)) + + +def test_n_components_none(): + # Ensures that n_components == None is handled correctly + rng = np.random.RandomState(1999) + for n_samples, n_features in [(50, 10), (10, 50)]: + X = rng.rand(n_samples, n_features) + ipca = IncrementalPCA(n_components=None) + + # First partial_fit call, ipca.n_components_ is inferred from + # min(X.shape) + ipca.partial_fit(X) + assert ipca.n_components_ == min(X.shape) + + # Second partial_fit call, ipca.n_components_ is inferred from + # ipca.components_ computed from the first partial_fit call + ipca.partial_fit(X) + assert ipca.n_components_ == ipca.components_.shape[0] + + +def test_incremental_pca_set_params(): + # Test that components_ sign is stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 20 + X = rng.randn(n_samples, n_features) + X2 = rng.randn(n_samples, n_features) + X3 = rng.randn(n_samples, n_features) + ipca = IncrementalPCA(n_components=20) + ipca.fit(X) + # Decreasing number of components + ipca.set_params(n_components=10) + with pytest.raises(ValueError): + ipca.partial_fit(X2) + # Increasing number of components + ipca.set_params(n_components=15) + with pytest.raises(ValueError): + ipca.partial_fit(X3) + # Returning to original setting + ipca.set_params(n_components=20) + ipca.partial_fit(X) + + +def test_incremental_pca_num_features_change(): + # Test that changing n_components will raise an error. + rng = np.random.RandomState(1999) + n_samples = 100 + X = rng.randn(n_samples, 20) + X2 = rng.randn(n_samples, 50) + ipca = IncrementalPCA(n_components=None) + ipca.fit(X) + with pytest.raises(ValueError): + ipca.partial_fit(X2) + + +def test_incremental_pca_batch_signs(): + # Test that components_ sign is stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(10, 20) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for i, j in zip(all_components[:-1], all_components[1:]): + assert_almost_equal(np.sign(i), np.sign(j), decimal=6) + + +def test_incremental_pca_batch_values(): + # Test that components_ values are stable over batch sizes. + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(20, 40, 3) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for i, j in zip(all_components[:-1], all_components[1:]): + assert_almost_equal(i, j, decimal=1) + + +def test_incremental_pca_batch_rank(): + # Test sample size in each batch is always larger or equal to n_components + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 20 + X = rng.randn(n_samples, n_features) + all_components = [] + batch_sizes = np.arange(20, 90, 3) + for batch_size in batch_sizes: + ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X) + all_components.append(ipca.components_) + + for components_i, components_j in zip(all_components[:-1], all_components[1:]): + assert_allclose_dense_sparse(components_i, components_j) + + +def test_incremental_pca_partial_fit(): + # Test that fit and partial_fit get equivalent results. + rng = np.random.RandomState(1999) + n, p = 50, 3 + X = rng.randn(n, p) # spherical data + X[:, 1] *= 0.00001 # make middle component relatively small + X += [5, 4, 3] # make a large mean + + # same check that we can find the original data from the transformed + # signal (since the data is almost of rank n_components) + batch_size = 10 + ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X) + pipca = IncrementalPCA(n_components=2, batch_size=batch_size) + # Add one to make sure endpoint is included + batch_itr = np.arange(0, n + 1, batch_size) + for i, j in zip(batch_itr[:-1], batch_itr[1:]): + pipca.partial_fit(X[i:j, :]) + assert_almost_equal(ipca.components_, pipca.components_, decimal=3) + + +def test_incremental_pca_against_pca_iris(): + # Test that IncrementalPCA and PCA are approximate (to a sign flip). + X = iris.data + + Y_pca = PCA(n_components=2).fit_transform(X) + Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X) + + assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) + + +def test_incremental_pca_against_pca_random_data(): + # Test that IncrementalPCA and PCA are approximate (to a sign flip). + rng = np.random.RandomState(1999) + n_samples = 100 + n_features = 3 + X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features) + + Y_pca = PCA(n_components=3).fit_transform(X) + Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X) + + assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1) + + +def test_explained_variances(): + # Test that PCA and IncrementalPCA calculations match + X = datasets.make_low_rank_matrix( + 1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999 + ) + prec = 3 + n_samples, n_features = X.shape + for nc in [None, 99]: + pca = PCA(n_components=nc).fit(X) + ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X) + assert_almost_equal( + pca.explained_variance_, ipca.explained_variance_, decimal=prec + ) + assert_almost_equal( + pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec + ) + assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec) + + +def test_singular_values(): + # Check that the IncrementalPCA output has the correct singular values + + rng = np.random.RandomState(0) + n_samples = 1000 + n_features = 100 + + X = datasets.make_low_rank_matrix( + n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng + ) + + pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X) + ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X) + assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2) + + # Compare to the Frobenius norm + X_pca = pca.transform(X) + X_ipca = ipca.transform(X) + assert_array_almost_equal( + np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12 + ) + assert_array_almost_equal( + np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2 + ) + + # Compare to the 2-norms of the score vectors + assert_array_almost_equal( + pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12 + ) + assert_array_almost_equal( + ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2 + ) + + # Set the singular values and see what we get back + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 110 + + X = datasets.make_low_rank_matrix( + n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng + ) + + pca = PCA(n_components=3, svd_solver="full", random_state=rng) + ipca = IncrementalPCA(n_components=3, batch_size=100) + + X_pca = pca.fit_transform(X) + X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) + X_pca[:, 0] *= 3.142 + X_pca[:, 1] *= 2.718 + + X_hat = np.dot(X_pca, pca.components_) + pca.fit(X_hat) + ipca.fit(X_hat) + assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14) + assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14) + + +def test_whitening(): + # Test that PCA and IncrementalPCA transforms match to sign flip. + X = datasets.make_low_rank_matrix( + 1000, 10, tail_strength=0.0, effective_rank=2, random_state=1999 + ) + prec = 3 + n_samples, n_features = X.shape + for nc in [None, 9]: + pca = PCA(whiten=True, n_components=nc).fit(X) + ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X) + + Xt_pca = pca.transform(X) + Xt_ipca = ipca.transform(X) + assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec) + Xinv_ipca = ipca.inverse_transform(Xt_ipca) + Xinv_pca = pca.inverse_transform(Xt_pca) + assert_almost_equal(X, Xinv_ipca, decimal=prec) + assert_almost_equal(X, Xinv_pca, decimal=prec) + assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec) + + +def test_incremental_pca_partial_fit_float_division(): + # Test to ensure float division is used in all versions of Python + # (non-regression test for issue #9489) + + rng = np.random.RandomState(0) + A = rng.randn(5, 3) + 2 + B = rng.randn(7, 3) + 5 + + pca = IncrementalPCA(n_components=2) + pca.partial_fit(A) + # Set n_samples_seen_ to be a floating point number instead of an int + pca.n_samples_seen_ = float(pca.n_samples_seen_) + pca.partial_fit(B) + singular_vals_float_samples_seen = pca.singular_values_ + + pca2 = IncrementalPCA(n_components=2) + pca2.partial_fit(A) + pca2.partial_fit(B) + singular_vals_int_samples_seen = pca2.singular_values_ + + np.testing.assert_allclose( + singular_vals_float_samples_seen, singular_vals_int_samples_seen + ) + + +def test_incremental_pca_fit_overflow_error(): + # Test for overflow error on Windows OS + # (non-regression test for issue #17693) + rng = np.random.RandomState(0) + A = rng.rand(500000, 2) + + ipca = IncrementalPCA(n_components=2, batch_size=10000) + ipca.fit(A) + + pca = PCA(n_components=2) + pca.fit(A) + + np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_) + + +def test_incremental_pca_feature_names_out(): + """Check feature names out for IncrementalPCA.""" + ipca = IncrementalPCA(n_components=2).fit(iris.data) + + names = ipca.get_feature_names_out() + assert_array_equal([f"incrementalpca{i}" for i in range(2)], names) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b222cf4e158ff7059c6e0c43fff678d907b82fea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py @@ -0,0 +1,566 @@ +import warnings + +import numpy as np +import pytest + +import sklearn +from sklearn.datasets import load_iris, make_blobs, make_circles +from sklearn.decomposition import PCA, KernelPCA +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import Perceptron +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _check_psd_eigenvalues + + +def test_kernel_pca(): + """Nominal test for all solvers and all known kernels + a custom one + + It tests + - that fit_transform is equivalent to fit+transform + - that the shapes of transforms and inverse transforms are correct + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + def histogram(x, y, **kwargs): + # Histogram kernel implemented as a callable. + assert kwargs == {} # no kernel_params that we didn't ask for + return np.minimum(x, y).sum() + + for eigen_solver in ("auto", "dense", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly", histogram): + # histogram kernel produces singular matrix inside linalg.solve + # XXX use a least-squares approximation? + inv = not callable(kernel) + + # transform fit data + kpca = KernelPCA( + 4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # non-regression test: previously, gamma would be 0 by default, + # forcing all eigenvalues to 0 under the poly kernel + assert X_fit_transformed.size != 0 + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform + if inv: + X_pred2 = kpca.inverse_transform(X_pred_transformed) + assert X_pred2.shape == X_pred.shape + + +def test_kernel_pca_invalid_parameters(): + """Check that kPCA raises an error if the parameters are invalid + + Tests fitting inverse transform with a precomputed kernel raises a + ValueError. + """ + estimator = KernelPCA( + n_components=10, fit_inverse_transform=True, kernel="precomputed" + ) + err_ms = "Cannot fit_inverse_transform with a precomputed kernel" + with pytest.raises(ValueError, match=err_ms): + estimator.fit(np.random.randn(10, 10)) + + +def test_kernel_pca_consistent_transform(): + """Check robustness to mutations in the original training array + + Test that after fitting a kPCA model, it stays independent of any + mutation of the values of the original data object by relying on an + internal copy. + """ + # X_fit_ needs to retain the old, unmodified copy of X + state = np.random.RandomState(0) + X = state.rand(10, 10) + kpca = KernelPCA(random_state=state).fit(X) + transformed1 = kpca.transform(X) + + X_copy = X.copy() + X[:, 0] = 666 + transformed2 = kpca.transform(X_copy) + assert_array_almost_equal(transformed1, transformed2) + + +def test_kernel_pca_deterministic_output(): + """Test that Kernel PCA produces deterministic output + + Tests that the same inputs and random state produce the same output. + """ + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + eigen_solver = ("arpack", "dense") + + for solver in eigen_solver: + transformed_X = np.zeros((20, 2)) + for i in range(20): + kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng) + transformed_X[i, :] = kpca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_kernel_pca_sparse(csr_container): + """Test that kPCA works on a sparse data input. + + Same test as ``test_kernel_pca except inverse_transform`` since it's not + implemented for sparse matrices. + """ + rng = np.random.RandomState(0) + X_fit = csr_container(rng.random_sample((5, 4))) + X_pred = csr_container(rng.random_sample((2, 4))) + + for eigen_solver in ("auto", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly"): + # transform fit data + kpca = KernelPCA( + 4, + kernel=kernel, + eigen_solver=eigen_solver, + fit_inverse_transform=False, + random_state=0, + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform: not available for sparse matrices + # XXX: should we raise another exception type here? For instance: + # NotImplementedError. + with pytest.raises(NotFittedError): + kpca.inverse_transform(X_pred_transformed) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +@pytest.mark.parametrize("n_features", [4, 10]) +def test_kernel_pca_linear_kernel(solver, n_features): + """Test that kPCA with linear kernel is equivalent to PCA for all solvers. + + KernelPCA with linear kernel should produce the same output as PCA. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, n_features)) + X_pred = rng.random_sample((2, n_features)) + + # for a linear kernel, kernel PCA should find the same projection as PCA + # modulo the sign (direction) + # fit only the first four components: fifth is near zero eigenvalue, so + # can be trimmed due to roundoff error + n_comps = 3 if solver == "arpack" else 4 + assert_array_almost_equal( + np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)), + np.abs( + PCA(n_comps, svd_solver=solver if solver != "dense" else "full") + .fit(X_fit) + .transform(X_pred) + ), + ) + + +def test_kernel_pca_n_components(): + """Test that `n_components` is correctly taken into account for projections + + For all solvers this tests that the output has the correct shape depending + on the selected number of components. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + for c in [1, 2, 4]: + kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver) + shape = kpca.fit(X_fit).transform(X_pred).shape + + assert shape == (2, c) + + +def test_remove_zero_eig(): + """Check that the ``remove_zero_eig`` parameter works correctly. + + Tests that the null-space (Zero) eigenvalues are removed when + remove_zero_eig=True, whereas they are not by default. + """ + X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]]) + + # n_components=None (default) => remove_zero_eig is True + kpca = KernelPCA() + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + kpca = KernelPCA(n_components=2) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 2) + + kpca = KernelPCA(n_components=2, remove_zero_eig=True) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + +def test_leave_zero_eig(): + """Non-regression test for issue #12141 (PR #12143) + + This test checks that fit().transform() returns the same result as + fit_transform() in case of non-removed zero eigenvalue. + """ + X_fit = np.array([[1, 1], [0, 0]]) + + # Assert that even with all np warnings on, there is no div by zero warning + with warnings.catch_warnings(): + # There might be warnings about the kernel being badly conditioned, + # but there should not be warnings about division by zero. + # (Numpy division by zero warning can have many message variants, but + # at least we know that it is a RuntimeWarning so lets check only this) + warnings.simplefilter("error", RuntimeWarning) + with np.errstate(all="warn"): + k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense") + # Fit, then transform + A = k.fit(X_fit).transform(X_fit) + # Do both at once + B = k.fit_transform(X_fit) + # Compare + assert_array_almost_equal(np.abs(A), np.abs(B)) + + +def test_kernel_pca_precomputed(): + """Test that kPCA works with a precomputed kernel, for all solvers""" + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + X_kpca = ( + KernelPCA(4, eigen_solver=eigen_solver, random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + X_kpca2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_pred, X_fit.T)) + ) + + X_kpca_train = KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ).fit_transform(np.dot(X_fit, X_fit.T)) + + X_kpca_train2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_fit, X_fit.T)) + ) + + assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2)) + + assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_kernel_pca_precomputed_non_symmetric(solver): + """Check that the kernel centerer works. + + Tests that a non symmetric precomputed kernel is actually accepted + because the kernel centerer does its job correctly. + """ + + # a non symmetric gram matrix + K = [[1, 2], [3, 40]] + kpca = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca.fit(K) # no error + + # same test with centered kernel + Kc = [[9, -9], [-9, 9]] + kpca_c = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca_c.fit(Kc) + + # comparison between the non-centered and centered versions + assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_) + assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_) + + +def test_gridsearch_pipeline(): + """Check that kPCA works as expected in a grid search pipeline + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="rbf", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + grid_search.fit(X, y) + assert grid_search.best_score_ == 1 + + +def test_gridsearch_pipeline_precomputed(): + """Check that kPCA works as expected in a grid search pipeline (2) + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. This test uses a precomputed kernel. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="precomputed", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(Perceptron__max_iter=np.arange(1, 5)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + X_kernel = rbf_kernel(X, gamma=2.0) + grid_search.fit(X_kernel, y) + assert grid_search.best_score_ == 1 + + +def test_nested_circles(): + """Check that kPCA projects in a space where nested circles are separable + + Tests that 2D nested circles become separable with a perceptron when + projected in the first 2 kPCA using an RBF kernel, while raw samples + are not directly separable in the original space. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + + # 2D nested circles are not linearly separable + train_score = Perceptron(max_iter=5).fit(X, y).score(X, y) + assert train_score < 0.8 + + # Project the circles data into the first 2 components of a RBF Kernel + # PCA model. + # Note that the gamma value is data dependent. If this test breaks + # and the gamma value has to be updated, the Kernel PCA example will + # have to be updated too. + kpca = KernelPCA( + kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0 + ) + X_kpca = kpca.fit_transform(X) + + # The data is perfectly linearly separable in that space + train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y) + assert train_score == 1.0 + + +def test_kernel_conditioning(): + """Check that ``_check_psd_eigenvalues`` is correctly called in kPCA + + Non-regression test for issue #12140 (PR #12145). + """ + + # create a pathological X leading to small non-zero eigenvalue + X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]] + kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True) + kpca.fit(X) + + # check that the small non-zero eigenvalue was correctly set to zero + assert kpca.eigenvalues_.min() == 0 + assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_precomputed_kernel_not_psd(solver): + """Check how KernelPCA works with non-PSD kernels depending on n_components + + Tests for all methods what happens with a non PSD gram matrix (this + can happen in an isomap scenario, or with custom kernel functions, or + maybe with ill-posed datasets). + + When ``n_component`` is large enough to capture a negative eigenvalue, an + error should be raised. Otherwise, KernelPCA should run without error + since the negative eigenvalues are not selected. + """ + + # a non PSD kernel with large eigenvalues, already centered + # it was captured from an isomap call and multiplied by 100 for compacity + K = [ + [4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78], + [-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49], + [8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23], + [2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9], + [-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75], + [-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46], + ] + # this gram matrix has 5 positive eigenvalues and 3 negative ones + # [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11] + + # 1. ask for enough components to get a significant negative one + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7) + # make sure that the appropriate error is raised + with pytest.raises(ValueError, match="There are significant negative eigenvalues"): + kpca.fit(K) + + # 2. ask for a small enough n_components to get only positive ones + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2) + if solver == "randomized": + # the randomized method is still inconsistent with the others on this + # since it selects the eigenvalues based on the largest 2 modules, not + # on the largest 2 values. + # + # At least we can ensure that we return an error instead of returning + # the wrong eigenvalues + with pytest.raises( + ValueError, match="There are significant negative eigenvalues" + ): + kpca.fit(K) + else: + # general case: make sure that it works + kpca.fit(K) + + +@pytest.mark.parametrize("n_components", [4, 10, 20]) +def test_kernel_pca_solvers_equivalence(n_components): + """Check that 'dense' 'arpack' & 'randomized' solvers give similar results""" + + # Generate random data + n_train, n_test = 1_000, 100 + X, _ = make_circles( + n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0 + ) + X_fit, X_pred = X[:n_train, :], X[n_train:, :] + + # reference (full) + ref_pred = ( + KernelPCA(n_components, eigen_solver="dense", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + # arpack + a_pred = ( + KernelPCA(n_components, eigen_solver="arpack", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approx + assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred)) + + # randomized + r_pred = ( + KernelPCA(n_components, eigen_solver="randomized", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approximation + assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred)) + + +def test_kernel_pca_inverse_transform_reconstruction(): + """Test if the reconstruction is a good approximation. + + Note that in general it is not possible to get an arbitrarily good + reconstruction because of kernel centering that does not + preserve all the information of the original data. + """ + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + + kpca = KernelPCA( + n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3 + ) + X_trans = kpca.fit_transform(X) + X_reconst = kpca.inverse_transform(X_trans) + assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1 + + +def test_kernel_pca_raise_not_fitted_error(): + X = np.random.randn(15).reshape(5, 3) + kpca = KernelPCA() + kpca.fit(X) + with pytest.raises(NotFittedError): + kpca.inverse_transform(X) + + +def test_32_64_decomposition_shape(): + """Test that the decomposition is similar for 32 and 64 bits data + + Non regression test for + https://github.com/scikit-learn/scikit-learn/issues/18146 + """ + X, y = make_blobs( + n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1 + ) + X = StandardScaler().fit_transform(X) + X -= X.min() + + # Compare the shapes (corresponds to the number of non-zero eigenvalues) + kpca = KernelPCA() + assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape + + +def test_kernel_pca_feature_names_out(): + """Check feature names out for KernelPCA.""" + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + kpca = KernelPCA(n_components=2).fit(X) + + names = kpca.get_feature_names_out() + assert_array_equal([f"kernelpca{i}" for i in range(2)], names) + + +def test_kernel_pca_inverse_correct_gamma(): + """Check that gamma is set correctly when not provided. + + Non-regression test for #26280 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + + kwargs = { + "n_components": 2, + "random_state": rng, + "fit_inverse_transform": True, + "kernel": "rbf", + } + + expected_gamma = 1 / X.shape[1] + kpca1 = KernelPCA(gamma=None, **kwargs).fit(X) + kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X) + + assert kpca1.gamma_ == expected_gamma + assert kpca2.gamma_ == expected_gamma + + X1_recon = kpca1.inverse_transform(kpca1.transform(X)) + X2_recon = kpca2.inverse_transform(kpca1.transform(X)) + + assert_allclose(X1_recon, X2_recon) + + +def test_kernel_pca_pandas_output(): + """Check that KernelPCA works with pandas output when the solver is arpack. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27579 + """ + pytest.importorskip("pandas") + X, _ = load_iris(as_frame=True, return_X_y=True) + with sklearn.config_context(transform_output="pandas"): + KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py new file mode 100644 index 0000000000000000000000000000000000000000..d442d0beeb57394b276ae4de9f683886d982f29e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_online_lda.py @@ -0,0 +1,477 @@ +import sys +from io import StringIO + +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from scipy.linalg import block_diag +from scipy.special import psi + +from sklearn.decomposition import LatentDirichletAllocation +from sklearn.decomposition._online_lda_fast import ( + _dirichlet_expectation_1d, + _dirichlet_expectation_2d, +) +from sklearn.exceptions import NotFittedError +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + if_safe_multiprocessing_with_blas, +) +from sklearn.utils.fixes import CSR_CONTAINERS + + +def _build_sparse_array(csr_container): + # Create 3 topics and each topic has 3 distinct words. + # (Each word only belongs to a single topic.) + n_components = 3 + block = np.full((3, 3), n_components, dtype=int) + blocks = [block] * n_components + X = block_diag(*blocks) + X = csr_container(X) + return (n_components, X) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_default_prior_params(csr_container): + # default prior parameter should be `1 / topics` + # and verbose params should not affect result + n_components, X = _build_sparse_array(csr_container) + prior = 1.0 / n_components + lda_1 = LatentDirichletAllocation( + n_components=n_components, + doc_topic_prior=prior, + topic_word_prior=prior, + random_state=0, + ) + lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0) + topic_distr_1 = lda_1.fit_transform(X) + topic_distr_2 = lda_2.fit_transform(X) + assert_almost_equal(topic_distr_1, topic_distr_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_batch(csr_container): + # Test LDA batch learning_offset (`fit` method with 'batch' learning) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + evaluate_every=1, + learning_method="batch", + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_online(csr_container): + # Test LDA online learning (`fit` method with 'online' learning) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=10.0, + evaluate_every=1, + learning_method="online", + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_partial_fit(csr_container): + # Test LDA online learning (`partial_fit` method) + # (same as test_lda_batch) + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=10.0, + total_samples=100, + random_state=rng, + ) + for i in range(3): + lda.partial_fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_dense_input(csr_container): + # Test LDA with dense input. + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, learning_method="batch", random_state=rng + ) + lda.fit(X.toarray()) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for component in lda.components_: + # Find top 3 words in each LDA component + top_idx = set(component.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +def test_lda_transform(): + # Test LDA transform. + # Transform result cannot be negative and should be normalized + rng = np.random.RandomState(0) + X = rng.randint(5, size=(20, 10)) + n_components = 3 + lda = LatentDirichletAllocation(n_components=n_components, random_state=rng) + X_trans = lda.fit_transform(X) + assert (X_trans > 0.0).any() + assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0])) + + +@pytest.mark.parametrize("method", ("online", "batch")) +def test_lda_fit_transform(method): + # Test LDA fit_transform & transform + # fit_transform and transform result should be the same + rng = np.random.RandomState(0) + X = rng.randint(10, size=(50, 20)) + lda = LatentDirichletAllocation( + n_components=5, learning_method=method, random_state=rng + ) + X_fit = lda.fit_transform(X) + X_trans = lda.transform(X) + assert_array_almost_equal(X_fit, X_trans, 4) + + +def test_lda_negative_input(): + # test pass dense matrix with sparse negative input. + X = np.full((5, 10), -1.0) + lda = LatentDirichletAllocation() + regex = r"^Negative values in data passed" + with pytest.raises(ValueError, match=regex): + lda.fit(X) + + +def test_lda_no_component_error(): + # test `perplexity` before `fit` + rng = np.random.RandomState(0) + X = rng.randint(4, size=(20, 10)) + lda = LatentDirichletAllocation() + regex = ( + "This LatentDirichletAllocation instance is not fitted yet. " + "Call 'fit' with appropriate arguments before using this " + "estimator." + ) + with pytest.raises(NotFittedError, match=regex): + lda.perplexity(X) + + +@if_safe_multiprocessing_with_blas +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +@pytest.mark.parametrize("method", ("online", "batch")) +def test_lda_multi_jobs(method, csr_container): + n_components, X = _build_sparse_array(csr_container) + # Test LDA batch training with multi CPU + rng = np.random.RandomState(0) + lda = LatentDirichletAllocation( + n_components=n_components, + n_jobs=2, + learning_method=method, + evaluate_every=1, + random_state=rng, + ) + lda.fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +@if_safe_multiprocessing_with_blas +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_partial_fit_multi_jobs(csr_container): + # Test LDA online training with multi CPU + rng = np.random.RandomState(0) + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + n_jobs=2, + learning_offset=5.0, + total_samples=30, + random_state=rng, + ) + for i in range(2): + lda.partial_fit(X) + + correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] + for c in lda.components_: + top_idx = set(c.argsort()[-3:][::-1]) + assert tuple(sorted(top_idx)) in correct_idx_grps + + +def test_lda_preplexity_mismatch(): + # test dimension mismatch in `perplexity` method + rng = np.random.RandomState(0) + n_components = rng.randint(3, 6) + n_samples = rng.randint(6, 10) + X = np.random.randint(4, size=(n_samples, 10)) + lda = LatentDirichletAllocation( + n_components=n_components, + learning_offset=5.0, + total_samples=20, + random_state=rng, + ) + lda.fit(X) + # invalid samples + invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components)) + with pytest.raises(ValueError, match=r"Number of samples"): + lda._perplexity_precomp_distr(X, invalid_n_samples) + # invalid topic number + invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1)) + with pytest.raises(ValueError, match=r"Number of topics"): + lda._perplexity_precomp_distr(X, invalid_n_components) + + +@pytest.mark.parametrize("method", ("online", "batch")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_perplexity(method, csr_container): + # Test LDA perplexity for batch training + # perplexity should be lower after each iteration + n_components, X = _build_sparse_array(csr_container) + lda_1 = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_2 = LatentDirichletAllocation( + n_components=n_components, + max_iter=10, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_1.fit(X) + perp_1 = lda_1.perplexity(X, sub_sampling=False) + + lda_2.fit(X) + perp_2 = lda_2.perplexity(X, sub_sampling=False) + assert perp_1 >= perp_2 + + perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True) + perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True) + assert perp_1_subsampling >= perp_2_subsampling + + +@pytest.mark.parametrize("method", ("online", "batch")) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_score(method, csr_container): + # Test LDA score for batch training + # score should be higher after each iteration + n_components, X = _build_sparse_array(csr_container) + lda_1 = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_2 = LatentDirichletAllocation( + n_components=n_components, + max_iter=10, + learning_method=method, + total_samples=100, + random_state=0, + ) + lda_1.fit_transform(X) + score_1 = lda_1.score(X) + + lda_2.fit_transform(X) + score_2 = lda_2.score(X) + assert score_2 >= score_1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_perplexity_input_format(csr_container): + # Test LDA perplexity for sparse and dense input + # score should be the same for both dense and sparse input + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method="batch", + total_samples=100, + random_state=0, + ) + lda.fit(X) + perp_1 = lda.perplexity(X) + perp_2 = lda.perplexity(X.toarray()) + assert_almost_equal(perp_1, perp_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_score_perplexity(csr_container): + # Test the relationship between LDA score and perplexity + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, max_iter=10, random_state=0 + ) + lda.fit(X) + perplexity_1 = lda.perplexity(X, sub_sampling=False) + + score = lda.score(X) + perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data))) + assert_almost_equal(perplexity_1, perplexity_2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_fit_perplexity(csr_container): + # Test that the perplexity computed during fit is consistent with what is + # returned by the perplexity method + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=1, + learning_method="batch", + random_state=0, + evaluate_every=1, + ) + lda.fit(X) + + # Perplexity computed at end of fit method + perplexity1 = lda.bound_ + + # Result of perplexity method on the train set + perplexity2 = lda.perplexity(X) + + assert_almost_equal(perplexity1, perplexity2) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_empty_docs(csr_container): + """Test LDA on empty document (all-zero rows).""" + Z = np.zeros((5, 4)) + for X in [Z, csr_container(Z)]: + lda = LatentDirichletAllocation(max_iter=750).fit(X) + assert_almost_equal( + lda.components_.sum(axis=0), np.ones(lda.components_.shape[1]) + ) + + +def test_dirichlet_expectation(): + """Test Cython version of Dirichlet expectation calculation.""" + x = np.logspace(-100, 10, 10000) + expectation = np.empty_like(x) + _dirichlet_expectation_1d(x, 0, expectation) + assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19) + + x = x.reshape(100, 100) + assert_allclose( + _dirichlet_expectation_2d(x), + psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]), + rtol=1e-11, + atol=3e-9, + ) + + +def check_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container +): + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation( + n_components=n_components, + max_iter=3, + learning_method="batch", + verbose=verbose, + evaluate_every=evaluate_every, + random_state=0, + ) + out = StringIO() + old_out, sys.stdout = sys.stdout, out + try: + lda.fit(X) + finally: + sys.stdout = old_out + + n_lines = out.getvalue().count("\n") + n_perplexity = out.getvalue().count("perplexity") + assert expected_lines == n_lines + assert expected_perplexities == n_perplexity + + +@pytest.mark.parametrize( + "verbose,evaluate_every,expected_lines,expected_perplexities", + [ + (False, 1, 0, 0), + (False, 0, 0, 0), + (True, 0, 3, 0), + (True, 1, 3, 3), + (True, 2, 3, 1), + ], +) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container +): + check_verbosity( + verbose, evaluate_every, expected_lines, expected_perplexities, csr_container + ) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_lda_feature_names_out(csr_container): + """Check feature names out for LatentDirichletAllocation.""" + n_components, X = _build_sparse_array(csr_container) + lda = LatentDirichletAllocation(n_components=n_components).fit(X) + + names = lda.get_feature_names_out() + assert_array_equal( + [f"latentdirichletallocation{i}" for i in range(n_components)], names + ) + + +@pytest.mark.parametrize("learning_method", ("batch", "online")) +def test_lda_dtype_match(learning_method, global_dtype): + """Check data type preservation of fitted attributes.""" + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False) + + lda = LatentDirichletAllocation( + n_components=5, random_state=0, learning_method=learning_method + ) + lda.fit(X) + assert lda.components_.dtype == global_dtype + assert lda.exp_dirichlet_component_.dtype == global_dtype + + +@pytest.mark.parametrize("learning_method", ("batch", "online")) +def test_lda_numerical_consistency(learning_method, global_random_seed): + """Check numerical consistency between np.float32 and np.float64.""" + rng = np.random.RandomState(global_random_seed) + X64 = rng.uniform(size=(20, 10)) + X32 = X64.astype(np.float32) + + lda_64 = LatentDirichletAllocation( + n_components=5, random_state=global_random_seed, learning_method=learning_method + ).fit(X64) + lda_32 = LatentDirichletAllocation( + n_components=5, random_state=global_random_seed, learning_method=learning_method + ).fit(X32) + + assert_allclose(lda_32.components_, lda_64.components_) + assert_allclose(lda_32.transform(X32), lda_64.transform(X64)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..3797970e3d6badc0a9537f410ae04cb24958bcf7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_sparse_pca.py @@ -0,0 +1,367 @@ +# Author: Vlad Niculae +# License: BSD 3 clause + +import sys + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from sklearn.decomposition import PCA, MiniBatchSparsePCA, SparsePCA +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + if_safe_multiprocessing_with_blas, +) + + +def generate_toy_data(n_components, n_samples, image_size, random_state=None): + n_features = image_size[0] * image_size[1] + + rng = check_random_state(random_state) + U = rng.randn(n_samples, n_components) + V = rng.randn(n_components, n_features) + + centers = [(3, 3), (6, 7), (8, 1)] + sz = [1, 2, 1] + for k in range(n_components): + img = np.zeros(image_size) + xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k] + ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k] + img[xmin:xmax][:, ymin:ymax] = 1.0 + V[k, :] = img.ravel() + + # Y is defined by : Y = UV + noise + Y = np.dot(U, V) + Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise + return Y, U, V + + +# SparsePCA can be a bit slow. To avoid having test times go up, we +# test different aspects of the code in the same test + + +def test_correct_shapes(): + rng = np.random.RandomState(0) + X = rng.randn(12, 10) + spca = SparsePCA(n_components=8, random_state=rng) + U = spca.fit_transform(X) + assert spca.components_.shape == (8, 10) + assert U.shape == (12, 8) + # test overcomplete decomposition + spca = SparsePCA(n_components=13, random_state=rng) + U = spca.fit_transform(X) + assert spca.components_.shape == (13, 10) + assert U.shape == (12, 13) + + +def test_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0) + spca_lars.fit(Y) + + # Test that CD gives similar results + spca_lasso = SparsePCA(n_components=3, method="cd", random_state=0, alpha=alpha) + spca_lasso.fit(Y) + assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) + + +@if_safe_multiprocessing_with_blas +def test_fit_transform_parallel(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=0) + spca_lars.fit(Y) + U1 = spca_lars.transform(Y) + # Test multiple CPUs + spca = SparsePCA( + n_components=3, n_jobs=2, method="lars", alpha=alpha, random_state=0 + ).fit(Y) + U2 = spca.transform(Y) + assert not np.all(spca_lars.components_ == 0) + assert_array_almost_equal(U1, U2) + + +def test_transform_nan(): + # Test that SparsePCA won't return NaN when there is 0 feature in all + # samples. + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + Y[:, 0] = 0 + estimator = SparsePCA(n_components=8) + assert not np.any(np.isnan(estimator.fit_transform(Y))) + + +def test_fit_transform_tall(): + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array + spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng) + U1 = spca_lars.fit_transform(Y) + spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng) + U2 = spca_lasso.fit(Y).transform(Y) + assert_array_almost_equal(U1, U2) + + +def test_initialization(): + rng = np.random.RandomState(0) + U_init = rng.randn(5, 3) + V_init = rng.randn(3, 4) + model = SparsePCA( + n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng + ) + model.fit(rng.randn(5, 4)) + assert_allclose(model.components_, V_init / np.linalg.norm(V_init, axis=1)[:, None]) + + +def test_mini_batch_correct_shapes(): + rng = np.random.RandomState(0) + X = rng.randn(12, 10) + pca = MiniBatchSparsePCA(n_components=8, max_iter=1, random_state=rng) + U = pca.fit_transform(X) + assert pca.components_.shape == (8, 10) + assert U.shape == (12, 8) + # test overcomplete decomposition + pca = MiniBatchSparsePCA(n_components=13, max_iter=1, random_state=rng) + U = pca.fit_transform(X) + assert pca.components_.shape == (13, 10) + assert U.shape == (12, 13) + + +# XXX: test always skipped +@pytest.mark.skipif(True, reason="skipping mini_batch_fit_transform.") +def test_mini_batch_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array + spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y) + U1 = spca_lars.transform(Y) + # Test multiple CPUs + if sys.platform == "win32": # fake parallelism for win32 + import joblib + + _mp = joblib.parallel.multiprocessing + joblib.parallel.multiprocessing = None + try: + spca = MiniBatchSparsePCA( + n_components=3, n_jobs=2, alpha=alpha, random_state=0 + ) + U2 = spca.fit(Y).transform(Y) + finally: + joblib.parallel.multiprocessing = _mp + else: # we can efficiently use parallelism + spca = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0) + U2 = spca.fit(Y).transform(Y) + assert not np.all(spca_lars.components_ == 0) + assert_array_almost_equal(U1, U2) + # Test that CD gives similar results + spca_lasso = MiniBatchSparsePCA( + n_components=3, method="cd", alpha=alpha, random_state=0 + ).fit(Y) + assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) + + +def test_scaling_fit_transform(): + alpha = 1 + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng) + spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=rng) + results_train = spca_lars.fit_transform(Y) + results_test = spca_lars.transform(Y[:10]) + assert_allclose(results_train[0], results_test[0]) + + +def test_pca_vs_spca(): + rng = np.random.RandomState(0) + Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng) + Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) + spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2) + pca = PCA(n_components=2) + pca.fit(Y) + spca.fit(Y) + results_test_pca = pca.transform(Z) + results_test_spca = spca.transform(Z) + assert_allclose( + np.abs(spca.components_.dot(pca.components_.T)), np.eye(2), atol=1e-5 + ) + results_test_pca *= np.sign(results_test_pca[0, :]) + results_test_spca *= np.sign(results_test_spca[0, :]) + assert_allclose(results_test_pca, results_test_spca) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +@pytest.mark.parametrize("n_components", [None, 3]) +def test_spca_n_components_(SPCA, n_components): + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + model = SPCA(n_components=n_components).fit(X) + + if n_components is not None: + assert model.n_components_ == n_components + else: + assert model.n_components_ == n_features + + +@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA)) +@pytest.mark.parametrize("method", ("lars", "cd")) +@pytest.mark.parametrize( + "data_type, expected_type", + ( + (np.float32, np.float32), + (np.float64, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + ), +) +def test_sparse_pca_dtype_match(SPCA, method, data_type, expected_type): + # Verify output matrix dtype + n_samples, n_features, n_components = 12, 10, 3 + rng = np.random.RandomState(0) + input_array = rng.randn(n_samples, n_features).astype(data_type) + model = SPCA(n_components=n_components, method=method) + transformed = model.fit_transform(input_array) + + assert transformed.dtype == expected_type + assert model.components_.dtype == expected_type + + +@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA)) +@pytest.mark.parametrize("method", ("lars", "cd")) +def test_sparse_pca_numerical_consistency(SPCA, method): + # Verify numericall consistentency among np.float32 and np.float64 + rtol = 1e-3 + alpha = 2 + n_samples, n_features, n_components = 12, 10, 3 + rng = np.random.RandomState(0) + input_array = rng.randn(n_samples, n_features) + + model_32 = SPCA( + n_components=n_components, alpha=alpha, method=method, random_state=0 + ) + transformed_32 = model_32.fit_transform(input_array.astype(np.float32)) + + model_64 = SPCA( + n_components=n_components, alpha=alpha, method=method, random_state=0 + ) + transformed_64 = model_64.fit_transform(input_array.astype(np.float64)) + + assert_allclose(transformed_64, transformed_32, rtol=rtol) + assert_allclose(model_64.components_, model_32.components_, rtol=rtol) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +def test_spca_feature_names_out(SPCA): + """Check feature names out for *SparsePCA.""" + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + model = SPCA(n_components=4).fit(X) + names = model.get_feature_names_out() + + estimator_name = SPCA.__name__.lower() + assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names) + + +# TODO(1.6): remove in 1.6 +def test_spca_max_iter_None_deprecation(): + """Check that we raise a warning for the deprecation of `max_iter=None`.""" + rng = np.random.RandomState(0) + n_samples, n_features = 12, 10 + X = rng.randn(n_samples, n_features) + + warn_msg = "`max_iter=None` is deprecated in version 1.4 and will be removed" + with pytest.warns(FutureWarning, match=warn_msg): + MiniBatchSparsePCA(max_iter=None).fit(X) + + +def test_spca_early_stopping(global_random_seed): + """Check that `tol` and `max_no_improvement` act as early stopping.""" + rng = np.random.RandomState(global_random_seed) + n_samples, n_features = 50, 10 + X = rng.randn(n_samples, n_features) + + # vary the tolerance to force the early stopping of one of the model + model_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=0.5, random_state=global_random_seed + ).fit(X) + model_not_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-3, random_state=global_random_seed + ).fit(X) + assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ + + # force the max number of no improvement to a large value to check that + # it does help to early stop + model_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed + ).fit(X) + model_not_early_stopped = MiniBatchSparsePCA( + max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed + ).fit(X) + assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_ + + +def test_equivalence_components_pca_spca(global_random_seed): + """Check the equivalence of the components found by PCA and SparsePCA. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23932 + """ + rng = np.random.RandomState(global_random_seed) + X = rng.randn(50, 4) + + n_components = 2 + pca = PCA( + n_components=n_components, + svd_solver="randomized", + random_state=0, + ).fit(X) + spca = SparsePCA( + n_components=n_components, + method="lars", + ridge_alpha=0, + alpha=0, + random_state=0, + ).fit(X) + + assert_allclose(pca.components_, spca.components_) + + +def test_sparse_pca_inverse_transform(): + """Check that `inverse_transform` in `SparsePCA` and `PCA` are similar.""" + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + X = rng.randn(n_samples, n_features) + + n_components = 2 + spca = SparsePCA( + n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 + ) + pca = PCA(n_components=n_components, random_state=0) + X_trans_spca = spca.fit_transform(X) + X_trans_pca = pca.fit_transform(X) + assert_allclose( + spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca) + ) + + +@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA]) +def test_transform_inverse_transform_round_trip(SPCA): + """Check the `transform` and `inverse_transform` round trip with no loss of + information. + """ + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + X = rng.randn(n_samples, n_features) + + n_components = n_features + spca = SPCA( + n_components=n_components, alpha=1e-12, ridge_alpha=1e-12, random_state=0 + ) + X_trans_spca = spca.fit_transform(X) + assert_allclose(spca.inverse_transform(X_trans_spca), X) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..4edb7d4a111094ce7c3ddafb8ef7a3024d76a964 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/tests/test_truncated_svd.py @@ -0,0 +1,212 @@ +"""Test truncated SVD transformer.""" + +import numpy as np +import pytest +import scipy.sparse as sp + +from sklearn.decomposition import PCA, TruncatedSVD +from sklearn.utils import check_random_state +from sklearn.utils._testing import assert_allclose, assert_array_less + +SVD_SOLVERS = ["arpack", "randomized"] + + +@pytest.fixture(scope="module") +def X_sparse(): + # Make an X that looks somewhat like a small tf-idf matrix. + rng = check_random_state(42) + X = sp.random(60, 55, density=0.2, format="csr", random_state=rng) + X.data[:] = 1 + np.log(X.data) + return X + + +@pytest.mark.parametrize("solver", ["randomized"]) +@pytest.mark.parametrize("kind", ("dense", "sparse")) +def test_solvers(X_sparse, solver, kind): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd_a = TruncatedSVD(30, algorithm="arpack") + svd = TruncatedSVD(30, algorithm=solver, random_state=42, n_oversamples=100) + + Xa = svd_a.fit_transform(X)[:, :6] + Xr = svd.fit_transform(X)[:, :6] + assert_allclose(Xa, Xr, rtol=2e-3) + + comp_a = np.abs(svd_a.components_) + comp = np.abs(svd.components_) + # All elements are equal, but some elements are more equal than others. + assert_allclose(comp_a[:9], comp[:9], rtol=1e-3) + assert_allclose(comp_a[9:], comp[9:], atol=1e-2) + + +@pytest.mark.parametrize("n_components", (10, 25, 41, 55)) +def test_attributes(n_components, X_sparse): + n_features = X_sparse.shape[1] + tsvd = TruncatedSVD(n_components).fit(X_sparse) + assert tsvd.n_components == n_components + assert tsvd.components_.shape == (n_components, n_features) + + +@pytest.mark.parametrize( + "algorithm, n_components", + [ + ("arpack", 55), + ("arpack", 56), + ("randomized", 56), + ], +) +def test_too_many_components(X_sparse, algorithm, n_components): + tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm) + with pytest.raises(ValueError): + tsvd.fit(X_sparse) + + +@pytest.mark.parametrize("fmt", ("array", "csr", "csc", "coo", "lil")) +def test_sparse_formats(fmt, X_sparse): + n_samples = X_sparse.shape[0] + Xfmt = X_sparse.toarray() if fmt == "dense" else getattr(X_sparse, "to" + fmt)() + tsvd = TruncatedSVD(n_components=11) + Xtrans = tsvd.fit_transform(Xfmt) + assert Xtrans.shape == (n_samples, 11) + Xtrans = tsvd.transform(Xfmt) + assert Xtrans.shape == (n_samples, 11) + + +@pytest.mark.parametrize("algo", SVD_SOLVERS) +def test_inverse_transform(algo, X_sparse): + # We need a lot of components for the reconstruction to be "almost + # equal" in all positions. XXX Test means or sums instead? + tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo) + Xt = tsvd.fit_transform(X_sparse) + Xinv = tsvd.inverse_transform(Xt) + assert_allclose(Xinv, X_sparse.toarray(), rtol=1e-1, atol=2e-1) + + +def test_integers(X_sparse): + n_samples = X_sparse.shape[0] + Xint = X_sparse.astype(np.int64) + tsvd = TruncatedSVD(n_components=6) + Xtrans = tsvd.fit_transform(Xint) + assert Xtrans.shape == (n_samples, tsvd.n_components) + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("n_components", [10, 20]) +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_explained_variance(X_sparse, kind, n_components, solver): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd = TruncatedSVD(n_components, algorithm=solver) + X_tr = svd.fit_transform(X) + # Assert that all the values are greater than 0 + assert_array_less(0.0, svd.explained_variance_ratio_) + + # Assert that total explained variance is less than 1 + assert_array_less(svd.explained_variance_ratio_.sum(), 1.0) + + # Test that explained_variance is correct + total_variance = np.var(X_sparse.toarray(), axis=0).sum() + variances = np.var(X_tr, axis=0) + true_explained_variance_ratio = variances / total_variance + + assert_allclose( + svd.explained_variance_ratio_, + true_explained_variance_ratio, + ) + + +@pytest.mark.parametrize("kind", ("dense", "sparse")) +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_explained_variance_components_10_20(X_sparse, kind, solver): + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd_10 = TruncatedSVD(10, algorithm=solver, n_iter=10).fit(X) + svd_20 = TruncatedSVD(20, algorithm=solver, n_iter=10).fit(X) + + # Assert the 1st component is equal + assert_allclose( + svd_10.explained_variance_ratio_, + svd_20.explained_variance_ratio_[:10], + rtol=5e-3, + ) + + # Assert that 20 components has higher explained variance than 10 + assert ( + svd_20.explained_variance_ratio_.sum() > svd_10.explained_variance_ratio_.sum() + ) + + +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_singular_values_consistency(solver): + # Check that the TruncatedSVD output has the correct singular values + rng = np.random.RandomState(0) + n_samples, n_features = 100, 80 + X = rng.randn(n_samples, n_features) + + pca = TruncatedSVD(n_components=2, algorithm=solver, random_state=rng).fit(X) + + # Compare to the Frobenius norm + X_pca = pca.transform(X) + assert_allclose( + np.sum(pca.singular_values_**2.0), + np.linalg.norm(X_pca, "fro") ** 2.0, + rtol=1e-2, + ) + + # Compare to the 2-norms of the score vectors + assert_allclose( + pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), rtol=1e-2 + ) + + +@pytest.mark.parametrize("solver", SVD_SOLVERS) +def test_singular_values_expected(solver): + # Set the singular values and see what we get back + rng = np.random.RandomState(0) + n_samples = 100 + n_features = 110 + + X = rng.randn(n_samples, n_features) + + pca = TruncatedSVD(n_components=3, algorithm=solver, random_state=rng) + X_pca = pca.fit_transform(X) + + X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0)) + X_pca[:, 0] *= 3.142 + X_pca[:, 1] *= 2.718 + + X_hat_pca = np.dot(X_pca, pca.components_) + pca.fit(X_hat_pca) + assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0], rtol=1e-14) + + +def test_truncated_svd_eq_pca(X_sparse): + # TruncatedSVD should be equal to PCA on centered data + + X_dense = X_sparse.toarray() + + X_c = X_dense - X_dense.mean(axis=0) + + params = dict(n_components=10, random_state=42) + + svd = TruncatedSVD(algorithm="arpack", **params) + pca = PCA(svd_solver="arpack", **params) + + Xt_svd = svd.fit_transform(X_c) + Xt_pca = pca.fit_transform(X_c) + + assert_allclose(Xt_svd, Xt_pca, rtol=1e-9) + assert_allclose(pca.mean_, 0, atol=1e-9) + assert_allclose(svd.components_, pca.components_) + + +@pytest.mark.parametrize( + "algorithm, tol", [("randomized", 0.0), ("arpack", 1e-6), ("arpack", 0.0)] +) +@pytest.mark.parametrize("kind", ("dense", "sparse")) +def test_fit_transform(X_sparse, algorithm, tol, kind): + # fit_transform(X) should equal fit(X).transform(X) + X = X_sparse if kind == "sparse" else X_sparse.toarray() + svd = TruncatedSVD( + n_components=5, n_iter=7, random_state=42, algorithm=algorithm, tol=tol + ) + X_transformed_1 = svd.fit_transform(X) + X_transformed_2 = svd.fit(X).transform(X) + assert_allclose(X_transformed_1, X_transformed_2) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e08785e8358bd039e8179368db28483be2cd55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__init__.py @@ -0,0 +1,14 @@ +"""The :mod:`sklearn.inspection` module includes tools for model inspection.""" + + +from ._partial_dependence import partial_dependence +from ._permutation_importance import permutation_importance +from ._plot.decision_boundary import DecisionBoundaryDisplay +from ._plot.partial_dependence import PartialDependenceDisplay + +__all__ = [ + "partial_dependence", + "permutation_importance", + "PartialDependenceDisplay", + "DecisionBoundaryDisplay", +] diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3bc76d7e51899cbed5027c355c29d0de7ead523 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f110b2c673a29e2756604413a284cc89e2a34659 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edf95b0bd7a51ce7c02fa987272ca607ce6da943 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad6094e02478a3dd579537b79355679a1a335f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py @@ -0,0 +1,743 @@ +"""Partial dependence plots for regression and classification models.""" + +# Authors: Peter Prettenhofer +# Trevor Stephens +# Nicolas Hug +# License: BSD 3 clause + +from collections.abc import Iterable + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ..base import is_classifier, is_regressor +from ..ensemble import RandomForestRegressor +from ..ensemble._gb import BaseGradientBoosting +from ..ensemble._hist_gradient_boosting.gradient_boosting import ( + BaseHistGradientBoosting, +) +from ..exceptions import NotFittedError +from ..tree import DecisionTreeRegressor +from ..utils import ( + Bunch, + _determine_key_type, + _get_column_indices, + _safe_assign, + _safe_indexing, + check_array, + check_matplotlib_support, # noqa +) +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils.extmath import cartesian +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._pd_utils import _check_feature_names, _get_feature_index + +__all__ = [ + "partial_dependence", +] + + +def _grid_from_X(X, percentiles, is_categorical, grid_resolution): + """Generate a grid of points based on the percentiles of X. + + The grid is a cartesian product between the columns of ``values``. The + ith column of ``values`` consists in ``grid_resolution`` equally-spaced + points between the percentiles of the jth column of X. + + If ``grid_resolution`` is bigger than the number of unique values in the + j-th column of X or if the feature is a categorical feature (by inspecting + `is_categorical`) , then those unique values will be used instead. + + Parameters + ---------- + X : array-like of shape (n_samples, n_target_features) + The data. + + percentiles : tuple of float + The percentiles which are used to construct the extreme values of + the grid. Must be in [0, 1]. + + is_categorical : list of bool + For each feature, tells whether it is categorical or not. If a feature + is categorical, then the values used will be the unique ones + (i.e. categories) instead of the percentiles. + + grid_resolution : int + The number of equally spaced points to be placed on the grid for each + feature. + + Returns + ------- + grid : ndarray of shape (n_points, n_target_features) + A value for each feature at each point in the grid. ``n_points`` is + always ``<= grid_resolution ** X.shape[1]``. + + values : list of 1d ndarrays + The values with which the grid has been created. The size of each + array ``values[j]`` is either ``grid_resolution``, or the number of + unique values in ``X[:, j]``, whichever is smaller. + """ + if not isinstance(percentiles, Iterable) or len(percentiles) != 2: + raise ValueError("'percentiles' must be a sequence of 2 elements.") + if not all(0 <= x <= 1 for x in percentiles): + raise ValueError("'percentiles' values must be in [0, 1].") + if percentiles[0] >= percentiles[1]: + raise ValueError("percentiles[0] must be strictly less than percentiles[1].") + + if grid_resolution <= 1: + raise ValueError("'grid_resolution' must be strictly greater than 1.") + + values = [] + # TODO: we should handle missing values (i.e. `np.nan`) specifically and store them + # in a different Bunch attribute. + for feature, is_cat in enumerate(is_categorical): + try: + uniques = np.unique(_safe_indexing(X, feature, axis=1)) + except TypeError as exc: + # `np.unique` will fail in the presence of `np.nan` and `str` categories + # due to sorting. Temporary, we reraise an error explaining the problem. + raise ValueError( + f"The column #{feature} contains mixed data types. Finding unique " + "categories fail due to sorting. It usually means that the column " + "contains `np.nan` values together with `str` categories. Such use " + "case is not yet supported in scikit-learn." + ) from exc + if is_cat or uniques.shape[0] < grid_resolution: + # Use the unique values either because: + # - feature has low resolution use unique values + # - feature is categorical + axis = uniques + else: + # create axis based on percentiles and grid resolution + emp_percentiles = mquantiles( + _safe_indexing(X, feature, axis=1), prob=percentiles, axis=0 + ) + if np.allclose(emp_percentiles[0], emp_percentiles[1]): + raise ValueError( + "percentiles are too close to each other, " + "unable to build the grid. Please choose percentiles " + "that are further apart." + ) + axis = np.linspace( + emp_percentiles[0], + emp_percentiles[1], + num=grid_resolution, + endpoint=True, + ) + values.append(axis) + + return cartesian(values), values + + +def _partial_dependence_recursion(est, grid, features): + """Calculate partial dependence via the recursion method. + + The recursion method is in particular enabled for tree-based estimators. + + For each `grid` value, a weighted tree traversal is performed: if a split node + involves an input feature of interest, the corresponding left or right branch + is followed; otherwise both branches are followed, each branch being weighted + by the fraction of training samples that entered that branch. Finally, the + partial dependence is given by a weighted average of all the visited leaves + values. + + This method is more efficient in terms of speed than the `'brute'` method + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`). + However, here, the partial dependence computation is done explicitly with the + `X` used during training of `est`. + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict` or + :term:`decision_function`. Multioutput-multiclass classifiers are not + supported. Note that `'recursion'` is only supported for some tree-based + estimators (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ). + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + """ + averaged_predictions = est._compute_partial_dependence_recursion(grid, features) + if averaged_predictions.ndim == 1: + # reshape to (1, n_points) for consistency with + # _partial_dependence_brute + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions + + +def _partial_dependence_brute( + est, grid, features, X, response_method, sample_weight=None +): + """Calculate partial dependence via the brute force method. + + The brute method explicitly averages the predictions of an estimator over a + grid of feature values. + + For each `grid` value, all the samples from `X` have their variables of + interest replaced by that specific `grid` value. The predictions are then made + and averaged across the samples. + + This method is slower than the `'recursion'` + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`) + version for estimators with this second option. However, with the `'brute'` + force method, the average will be done with the given `X` and not the `X` + used during training, as it is done in the `'recursion'` version. Therefore + the average can always accept `sample_weight` (even when the estimator was + fitted without). + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + X : array-like of shape (n_samples, n_features) + `X` is used to generate values for the complement features. That is, for + each value in `grid`, the method will average the prediction of each + sample from `X` having that grid value for `features`. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. Note that + `sample_weight` does not change the individual predictions. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + + predictions : array-like + The predictions for the given `grid` of features values over the samples + from `X`. For non-multioutput regression and binary classification the + shape is `(n_instances, n_points)` and for multi-output regression and + multiclass classification the shape is `(n_targets, n_instances, n_points)`, + where `n_targets` is the number of targets (`n_tasks` for multi-output + regression, and `n_classes` for multiclass classification), `n_instances` + is the number of instances in `X`, and `n_points` is the number of points + in the `grid`. + """ + predictions = [] + averaged_predictions = [] + + # define the prediction_method (predict, predict_proba, decision_function). + if is_regressor(est): + prediction_method = est.predict + else: + predict_proba = getattr(est, "predict_proba", None) + decision_function = getattr(est, "decision_function", None) + if response_method == "auto": + # try predict_proba, then decision_function if it doesn't exist + prediction_method = predict_proba or decision_function + else: + prediction_method = ( + predict_proba + if response_method == "predict_proba" + else decision_function + ) + if prediction_method is None: + if response_method == "auto": + raise ValueError( + "The estimator has no predict_proba and no " + "decision_function method." + ) + elif response_method == "predict_proba": + raise ValueError("The estimator has no predict_proba method.") + else: + raise ValueError("The estimator has no decision_function method.") + + X_eval = X.copy() + for new_values in grid: + for i, variable in enumerate(features): + _safe_assign(X_eval, new_values[i], column_indexer=variable) + + try: + # Note: predictions is of shape + # (n_points,) for non-multioutput regressors + # (n_points, n_tasks) for multioutput regressors + # (n_points, 1) for the regressors in cross_decomposition (I think) + # (n_points, 2) for binary classification + # (n_points, n_classes) for multiclass classification + pred = prediction_method(X_eval) + + predictions.append(pred) + # average over samples + averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight)) + except NotFittedError as e: + raise ValueError("'estimator' parameter must be a fitted estimator") from e + + n_samples = X.shape[0] + + # reshape to (n_targets, n_instances, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + predictions = np.array(predictions).T + if is_regressor(est) and predictions.ndim == 2: + # non-multioutput regression, shape is (n_instances, n_points,) + predictions = predictions.reshape(n_samples, -1) + elif is_classifier(est) and predictions.shape[0] == 2: + # Binary classification, shape is (2, n_instances, n_points). + # we output the effect of **positive** class + predictions = predictions[1] + predictions = predictions.reshape(n_samples, -1) + + # reshape averaged_predictions to (n_targets, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + averaged_predictions = np.array(averaged_predictions).T + if is_regressor(est) and averaged_predictions.ndim == 1: + # non-multioutput regression, shape is (n_points,) + averaged_predictions = averaged_predictions.reshape(1, -1) + elif is_classifier(est) and averaged_predictions.shape[0] == 2: + # Binary classification, shape is (2, n_points). + # we output the effect of **positive** class + averaged_predictions = averaged_predictions[1] + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions, predictions + + +@validate_params( + { + "estimator": [ + HasMethods(["fit", "predict"]), + HasMethods(["fit", "predict_proba"]), + HasMethods(["fit", "decision_function"]), + ], + "X": ["array-like", "sparse matrix"], + "features": ["array-like", Integral, str], + "sample_weight": ["array-like", None], + "categorical_features": ["array-like", None], + "feature_names": ["array-like", None], + "response_method": [StrOptions({"auto", "predict_proba", "decision_function"})], + "percentiles": [tuple], + "grid_resolution": [Interval(Integral, 1, None, closed="left")], + "method": [StrOptions({"auto", "recursion", "brute"})], + "kind": [StrOptions({"average", "individual", "both"})], + }, + prefer_skip_nested_validation=True, +) +def partial_dependence( + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + response_method="auto", + percentiles=(0.05, 0.95), + grid_resolution=100, + method="auto", + kind="average", +): + """Partial dependence of ``features``. + + Partial dependence of a feature (or a set of features) corresponds to + the average response of an estimator for each possible value of the + feature. + + Read more in the :ref:`User Guide `. + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is 'brute'. + + features : array-like of {int, str, bool} or int or str + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + .. versionadded:: 1.2 + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is 'recursion', the response is always the output of + :term:`decision_function`. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the grid. Must be in [0, 1]. + + grid_resolution : int, default=100 + The number of equally spaced points on the grid, for each target + feature. + + method : {'auto', 'recursion', 'brute'}, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ) when `kind='average'`. + This is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the Individual Conditional Expectation (ICE) by + design, it is not compatible with ICE and thus `kind` must be + `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to return the partial dependence averaged across all the + samples in the dataset or one value per sample or both. + See Returns below. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + + Returns + ------- + predictions : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + individual : ndarray of shape (n_outputs, n_instances, \ + len(values[0]), len(values[1]), ...) + The predictions for all the points in the grid for all + samples in X. This is also known as Individual + Conditional Expectation (ICE). + Only available when `kind='individual'` or `kind='both'`. + + average : ndarray of shape (n_outputs, len(values[0]), \ + len(values[1]), ...) + The predictions for all the points in the grid, averaged + over all samples in X (or over the training data if + `method` is 'recursion'). + Only available when `kind='average'` or `kind='both'`. + + values : seq of 1d ndarrays + The values with which the grid has been created. + + .. deprecated:: 1.3 + The key `values` has been deprecated in 1.3 and will be removed + in 1.5 in favor of `grid_values`. See `grid_values` for details + about the `values` attribute. + + grid_values : seq of 1d ndarrays + The values with which the grid has been created. The generated + grid is a cartesian product of the arrays in `grid_values` where + `len(grid_values) == len(features)`. The size of each array + `grid_values[j]` is either `grid_resolution`, or the number of + unique values in `X[:, j]`, whichever is smaller. + + .. versionadded:: 1.3 + + `n_outputs` corresponds to the number of classes in a multi-class + setting, or to the number of tasks for multi-output regression. + For classical regression and binary classification `n_outputs==1`. + `n_values_feature_j` corresponds to the size `grid_values[j]`. + + See Also + -------- + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + PartialDependenceDisplay : Partial Dependence visualization. + + Examples + -------- + >>> X = [[0, 0, 2], [1, 0, 0]] + >>> y = [0, 1] + >>> from sklearn.ensemble import GradientBoostingClassifier + >>> gb = GradientBoostingClassifier(random_state=0).fit(X, y) + >>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1), + ... grid_resolution=2) # doctest: +SKIP + (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) + """ + check_is_fitted(estimator) + + if not (is_classifier(estimator) or is_regressor(estimator)): + raise ValueError("'estimator' must be a fitted regressor or classifier.") + + if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray): + raise ValueError("Multiclass-multioutput estimators are not supported") + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, force_all_finite="allow-nan", dtype=object) + + if is_regressor(estimator) and response_method != "auto": + raise ValueError( + "The response_method parameter is ignored for regressors and " + "must be 'auto'." + ) + + if kind != "average": + if method == "recursion": + raise ValueError( + "The 'recursion' method only applies when 'kind' is set to 'average'" + ) + method = "brute" + + if method == "recursion" and sample_weight is not None: + raise ValueError( + "The 'recursion' method can only be applied when sample_weight is None." + ) + + if method == "auto": + if sample_weight is not None: + method = "brute" + elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None: + method = "recursion" + elif isinstance( + estimator, + (BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor), + ): + method = "recursion" + else: + method = "brute" + + if method == "recursion": + if not isinstance( + estimator, + ( + BaseGradientBoosting, + BaseHistGradientBoosting, + DecisionTreeRegressor, + RandomForestRegressor, + ), + ): + supported_classes_recursion = ( + "GradientBoostingClassifier", + "GradientBoostingRegressor", + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + "HistGradientBoostingRegressor", + "DecisionTreeRegressor", + "RandomForestRegressor", + ) + raise ValueError( + "Only the following estimators support the 'recursion' " + "method: {}. Try using method='brute'.".format( + ", ".join(supported_classes_recursion) + ) + ) + if response_method == "auto": + response_method = "decision_function" + + if response_method != "decision_function": + raise ValueError( + "With the 'recursion' method, the response_method must be " + "'decision_function'. Got {}.".format(response_method) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if _determine_key_type(features, accept_slice=False) == "int": + # _get_column_indices() supports negative indexing. Here, we limit + # the indexing to be positive. The upper bound will be checked + # by _get_column_indices() + if np.any(np.less(features, 0)): + raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1)) + + features_indices = np.asarray( + _get_column_indices(X, features), dtype=np.int32, order="C" + ).ravel() + + feature_names = _check_feature_names(X, feature_names) + + n_features = X.shape[1] + if categorical_features is None: + is_categorical = [False] * len(features_indices) + else: + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [categorical_features[idx] for idx in features_indices] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + idx in categorical_features_idx for idx in features_indices + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + grid, values = _grid_from_X( + _safe_indexing(X, features_indices, axis=1), + percentiles, + is_categorical, + grid_resolution, + ) + + if method == "brute": + averaged_predictions, predictions = _partial_dependence_brute( + estimator, grid, features_indices, X, response_method, sample_weight + ) + + # reshape predictions to + # (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...) + predictions = predictions.reshape( + -1, X.shape[0], *[val.shape[0] for val in values] + ) + else: + averaged_predictions = _partial_dependence_recursion( + estimator, grid, features_indices + ) + + # reshape averaged_predictions to + # (n_outputs, n_values_feature_0, n_values_feature_1, ...) + averaged_predictions = averaged_predictions.reshape( + -1, *[val.shape[0] for val in values] + ) + pdp_results = Bunch() + + msg = ( + "Key: 'values', is deprecated in 1.3 and will be removed in 1.5. " + "Please use 'grid_values' instead." + ) + pdp_results._set_deprecated( + values, new_key="grid_values", deprecated_key="values", warning_message=msg + ) + + if kind == "average": + pdp_results["average"] = averaged_predictions + elif kind == "individual": + pdp_results["individual"] = predictions + else: # kind='both' + pdp_results["average"] = averaged_predictions + pdp_results["individual"] = predictions + + return pdp_results diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76f4d626fd53c3e669f29335e65e724e5e33e382 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py @@ -0,0 +1,64 @@ +def _check_feature_names(X, feature_names=None): + """Check feature names. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + feature_names : None or array-like of shape (n_names,), dtype=str + Feature names to check or `None`. + + Returns + ------- + feature_names : list of str + Feature names validated. If `feature_names` is `None`, then a list of + feature names is provided, i.e. the column names of a pandas dataframe + or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a + NumPy array. + """ + if feature_names is None: + if hasattr(X, "columns") and hasattr(X.columns, "tolist"): + # get the column names for a pandas dataframe + feature_names = X.columns.tolist() + else: + # define a list of numbered indices for a numpy array + feature_names = [f"x{i}" for i in range(X.shape[1])] + elif hasattr(feature_names, "tolist"): + # convert numpy array or pandas index to a list + feature_names = feature_names.tolist() + if len(set(feature_names)) != len(feature_names): + raise ValueError("feature_names should not contain duplicates.") + + return feature_names + + +def _get_feature_index(fx, feature_names=None): + """Get feature index. + + Parameters + ---------- + fx : int or str + Feature index or name. + + feature_names : list of str, default=None + All feature names from which to search the indices. + + Returns + ------- + idx : int + Feature index. + """ + if isinstance(fx, str): + if feature_names is None: + raise ValueError( + f"Cannot plot partial dependence for feature {fx!r} since " + "the list of feature names was not provided, neither as " + "column names of a pandas data-frame nor via the feature_names " + "parameter." + ) + try: + return feature_names.index(fx) + except ValueError as e: + raise ValueError(f"Feature {fx!r} not in feature_names") from e + return fx diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..3d96acff9b91a52916b0a29ad45f8d86fad8a9e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py @@ -0,0 +1,317 @@ +"""Permutation importance for estimators.""" + +import numbers + +import numpy as np + +from ..ensemble._bagging import _generate_indices +from ..metrics import check_scoring, get_scorer_names +from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer +from ..model_selection._validation import _aggregate_score_dicts +from ..utils import Bunch, _safe_indexing, check_array, check_random_state +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + RealNotInt, + StrOptions, + validate_params, +) +from ..utils.parallel import Parallel, delayed + + +def _weights_scorer(scorer, estimator, X, y, sample_weight): + if sample_weight is not None: + return scorer(estimator, X, y, sample_weight=sample_weight) + return scorer(estimator, X, y) + + +def _calculate_permutation_scores( + estimator, + X, + y, + sample_weight, + col_idx, + random_state, + n_repeats, + scorer, + max_samples, +): + """Calculate score when `col_idx` is permuted.""" + random_state = check_random_state(random_state) + + # Work on a copy of X to ensure thread-safety in case of threading based + # parallelism. Furthermore, making a copy is also useful when the joblib + # backend is 'loky' (default) or the old 'multiprocessing': in those cases, + # if X is large it will be automatically be backed by a readonly memory map + # (memmap). X.copy() on the other hand is always guaranteed to return a + # writable data-structure whose columns can be shuffled inplace. + if max_samples < X.shape[0]: + row_indices = _generate_indices( + random_state=random_state, + bootstrap=False, + n_population=X.shape[0], + n_samples=max_samples, + ) + X_permuted = _safe_indexing(X, row_indices, axis=0) + y = _safe_indexing(y, row_indices, axis=0) + if sample_weight is not None: + sample_weight = _safe_indexing(sample_weight, row_indices, axis=0) + else: + X_permuted = X.copy() + + scores = [] + shuffling_idx = np.arange(X_permuted.shape[0]) + for _ in range(n_repeats): + random_state.shuffle(shuffling_idx) + if hasattr(X_permuted, "iloc"): + col = X_permuted.iloc[shuffling_idx, col_idx] + col.index = X_permuted.index + X_permuted[X_permuted.columns[col_idx]] = col + else: + X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx] + scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight)) + + if isinstance(scores[0], dict): + scores = _aggregate_score_dicts(scores) + else: + scores = np.array(scores) + + return scores + + +def _create_importances_bunch(baseline_score, permuted_score): + """Compute the importances as the decrease in score. + + Parameters + ---------- + baseline_score : ndarray of shape (n_features,) + The baseline score without permutation. + permuted_score : ndarray of shape (n_features, n_repeats) + The permuted scores for the `n` repetitions. + + Returns + ------- + importances : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + importances_mean : ndarray, shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray, shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray, shape (n_features, n_repeats) + Raw permutation importance scores. + """ + importances = baseline_score - permuted_score + return Bunch( + importances_mean=np.mean(importances, axis=1), + importances_std=np.std(importances, axis=1), + importances=importances, + ) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like"], + "y": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_repeats": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "sample_weight": ["array-like", None], + "max_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + }, + prefer_skip_nested_validation=True, +) +def permutation_importance( + estimator, + X, + y, + *, + scoring=None, + n_repeats=5, + n_jobs=None, + random_state=None, + sample_weight=None, + max_samples=1.0, +): + """Permutation importance for feature evaluation [BRE]_. + + The :term:`estimator` is required to be a fitted estimator. `X` can be the + data set used to train the estimator or a hold-out set. The permutation + importance of a feature is calculated as follows. First, a baseline metric, + defined by :term:`scoring`, is evaluated on a (potentially different) + dataset defined by the `X`. Next, a feature column from the validation set + is permuted and the metric is evaluated again. The permutation importance + is defined to be the difference between the baseline metric and metric from + permutating the feature column. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + An estimator that has already been :term:`fitted` and is compatible + with :term:`scorer`. + + X : ndarray or DataFrame, shape (n_samples, n_features) + Data on which permutation importance will be computed. + + y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) + Targets for supervised or `None` for unsupervised. + + scoring : str, callable, list, tuple, or dict, default=None + Scorer to use. + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + Passing multiple scores to `scoring` is more efficient than calling + `permutation_importance` for each of the scores as it reuses + predictions to avoid redundant computation. + + If None, the estimator's default scorer is used. + + n_repeats : int, default=5 + Number of times to permute a feature. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. The computation is done by computing + permutation score for each columns and parallelized over the columns. + `None` means 1 unless in a :obj:`joblib.parallel_backend` context. + `-1` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Pseudo-random number generator to control the permutations of each + feature. + Pass an int to get reproducible results across function calls. + See :term:`Glossary `. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights used in scoring. + + .. versionadded:: 0.24 + + max_samples : int or float, default=1.0 + The number of samples to draw from X to compute feature importance + in each repeat (without replacement). + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + - If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples + will be used. + + While using this option may provide less accurate importance estimates, + it keeps the method tractable when evaluating feature importance on + large datasets. In combination with `n_repeats`, this allows to control + the computational speed vs statistical accuracy trade-off of this method. + + .. versionadded:: 1.0 + + Returns + ------- + result : :class:`~sklearn.utils.Bunch` or dict of such instances + Dictionary-like object, with the following attributes. + + importances_mean : ndarray of shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray of shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray of shape (n_features, n_repeats) + Raw permutation importance scores. + + If there are multiple scoring metrics in the scoring parameter + `result` is a dict with scorer names as keys (e.g. 'roc_auc') and + `Bunch` objects like above as values. + + References + ---------- + .. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, + 2001. <10.1023/A:1010933404324>` + + Examples + -------- + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import permutation_importance + >>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9], + ... [0, 9, 9],[0, 9, 9],[0, 9, 9]] + >>> y = [1, 1, 1, 0, 0, 0] + >>> clf = LogisticRegression().fit(X, y) + >>> result = permutation_importance(clf, X, y, n_repeats=10, + ... random_state=0) + >>> result.importances_mean + array([0.4666..., 0. , 0. ]) + >>> result.importances_std + array([0.2211..., 0. , 0. ]) + """ + if not hasattr(X, "iloc"): + X = check_array(X, force_all_finite="allow-nan", dtype=None) + + # Precompute random seed from the random state to be used + # to get a fresh independent RandomState instance for each + # parallel call to _calculate_permutation_scores, irrespective of + # the fact that variables are shared or not depending on the active + # joblib backend (sequential, thread-based or process-based). + random_state = check_random_state(random_state) + random_seed = random_state.randint(np.iinfo(np.int32).max + 1) + + if not isinstance(max_samples, numbers.Integral): + max_samples = int(max_samples * X.shape[0]) + elif max_samples > X.shape[0]: + raise ValueError("max_samples must be <= n_samples") + + if callable(scoring): + scorer = scoring + elif scoring is None or isinstance(scoring, str): + scorer = check_scoring(estimator, scoring=scoring) + else: + scorers_dict = _check_multimetric_scoring(estimator, scoring) + scorer = _MultimetricScorer(scorers=scorers_dict) + + baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight) + + scores = Parallel(n_jobs=n_jobs)( + delayed(_calculate_permutation_scores)( + estimator, + X, + y, + sample_weight, + col_idx, + random_seed, + n_repeats, + scorer, + max_samples, + ) + for col_idx in range(X.shape[1]) + ) + + if isinstance(baseline_score, dict): + return { + name: _create_importances_bunch( + baseline_score[name], + # unpack the permuted scores + np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]), + ) + for name in baseline_score + } + else: + return _create_importances_bunch(baseline_score, np.array(scores)) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c7f22de8c174b1e99ef9a91e3e3d0f9118648c6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fb90ffc2dde325f71998cca6ef204af9f1d1f1d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a88e4cf88814f97cb9f6c9d62971c06d2a8d9b45 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..12162b25c53ed4a588dbf476774fc22ba41ee49a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py @@ -0,0 +1,406 @@ +import numpy as np + +from ...base import is_regressor +from ...preprocessing import LabelEncoder +from ...utils import _safe_indexing, check_matplotlib_support +from ...utils._response import _get_response_values +from ...utils.validation import ( + _is_arraylike_not_scalar, + _num_features, + check_is_fitted, +) + + +def _check_boundary_response_method(estimator, response_method, class_of_interest): + """Validate the response methods to be used with the fitted estimator. + + Parameters + ---------- + estimator : object + Fitted estimator to check. + + response_method : {'auto', 'predict_proba', 'decision_function', 'predict'} + Specifies whether to use :term:`predict_proba`, + :term:`decision_function`, :term:`predict` as the target response. + If set to 'auto', the response method is tried in the following order: + :term:`decision_function`, :term:`predict_proba`, :term:`predict`. + + class_of_interest : int, float, bool, str or None + The class considered when plotting the decision. If the label is specified, it + is then possible to plot the decision boundary in multiclass settings. + + .. versionadded:: 1.4 + + Returns + ------- + prediction_method : list of str or str + The name or list of names of the response methods to use. + """ + has_classes = hasattr(estimator, "classes_") + if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]): + msg = "Multi-label and multi-output multi-class classifiers are not supported" + raise ValueError(msg) + + if has_classes and len(estimator.classes_) > 2: + if response_method not in {"auto", "predict"} and class_of_interest is None: + msg = ( + "Multiclass classifiers are only supported when `response_method` is " + "'predict' or 'auto'. Else you must provide `class_of_interest` to " + "plot the decision boundary of a specific class." + ) + raise ValueError(msg) + prediction_method = "predict" if response_method == "auto" else response_method + elif response_method == "auto": + if is_regressor(estimator): + prediction_method = "predict" + else: + prediction_method = ["decision_function", "predict_proba", "predict"] + else: + prediction_method = response_method + + return prediction_method + + +class DecisionBoundaryDisplay: + """Decisions boundary visualization. + + It is recommended to use + :func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator` + to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as + attributes. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + xx0 : ndarray of shape (grid_resolution, grid_resolution) + First output of :func:`meshgrid `. + + xx1 : ndarray of shape (grid_resolution, grid_resolution) + Second output of :func:`meshgrid `. + + response : ndarray of shape (grid_resolution, grid_resolution) + Values of the response function. + + xlabel : str, default=None + Default label to place on x axis. + + ylabel : str, default=None + Default label to place on y axis. + + Attributes + ---------- + surface_ : matplotlib `QuadContourSet` or `QuadMesh` + If `plot_method` is 'contour' or 'contourf', `surface_` is a + :class:`QuadContourSet `. If + `plot_method` is 'pcolormesh', `surface_` is a + :class:`QuadMesh `. + + ax_ : matplotlib Axes + Axes with decision boundary. + + figure_ : matplotlib Figure + Figure containing the decision boundary. + + See Also + -------- + DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> from sklearn.inspection import DecisionBoundaryDisplay + >>> from sklearn.tree import DecisionTreeClassifier + >>> iris = load_iris() + >>> feature_1, feature_2 = np.meshgrid( + ... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()), + ... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max()) + ... ) + >>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T + >>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target) + >>> y_pred = np.reshape(tree.predict(grid), feature_1.shape) + >>> display = DecisionBoundaryDisplay( + ... xx0=feature_1, xx1=feature_2, response=y_pred + ... ) + >>> display.plot() + <...> + >>> display.ax_.scatter( + ... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black" + ... ) + <...> + >>> plt.show() + """ + + def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None): + self.xx0 = xx0 + self.xx1 = xx1 + self.response = response + self.xlabel = xlabel + self.ylabel = ylabel + + def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs): + """Plot visualization. + + Parameters + ---------- + plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf' + Plotting method to call when plotting the response. Please refer + to the following matplotlib documentation for details: + :func:`contourf `, + :func:`contour `, + :func:`pcolormesh `. + + ax : Matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + xlabel : str, default=None + Overwrite the x-axis label. + + ylabel : str, default=None + Overwrite the y-axis label. + + **kwargs : dict + Additional keyword arguments to be passed to the `plot_method`. + + Returns + ------- + display: :class:`~sklearn.inspection.DecisionBoundaryDisplay` + Object that stores computed values. + """ + check_matplotlib_support("DecisionBoundaryDisplay.plot") + import matplotlib.pyplot as plt # noqa + + if plot_method not in ("contourf", "contour", "pcolormesh"): + raise ValueError( + "plot_method must be 'contourf', 'contour', or 'pcolormesh'" + ) + + if ax is None: + _, ax = plt.subplots() + + plot_func = getattr(ax, plot_method) + self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs) + + if xlabel is not None or not ax.get_xlabel(): + xlabel = self.xlabel if xlabel is None else xlabel + ax.set_xlabel(xlabel) + if ylabel is not None or not ax.get_ylabel(): + ylabel = self.ylabel if ylabel is None else ylabel + ax.set_ylabel(ylabel) + + self.ax_ = ax + self.figure_ = ax.figure + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + *, + grid_resolution=100, + eps=1.0, + plot_method="contourf", + response_method="auto", + class_of_interest=None, + xlabel=None, + ylabel=None, + ax=None, + **kwargs, + ): + """Plot decision boundary given an estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + Trained estimator used to plot the decision boundary. + + X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2) + Input data that should be only 2-dimensional. + + grid_resolution : int, default=100 + Number of grid points to use for plotting decision boundary. + Higher values will make the plot look nicer but be slower to + render. + + eps : float, default=1.0 + Extends the minimum and maximum values of X for evaluating the + response function. + + plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf' + Plotting method to call when plotting the response. Please refer + to the following matplotlib documentation for details: + :func:`contourf `, + :func:`contour `, + :func:`pcolormesh `. + + response_method : {'auto', 'predict_proba', 'decision_function', \ + 'predict'}, default='auto' + Specifies whether to use :term:`predict_proba`, + :term:`decision_function`, :term:`predict` as the target response. + If set to 'auto', the response method is tried in the following order: + :term:`decision_function`, :term:`predict_proba`, :term:`predict`. + For multiclass problems, :term:`predict` is selected when + `response_method="auto"`. + + class_of_interest : int, float, bool or str, default=None + The class considered when plotting the decision. If None, + `estimator.classes_[1]` is considered as the positive class + for binary classifiers. For multiclass classifiers, passing + an explicit value for `class_of_interest` is mandatory. + + .. versionadded:: 1.4 + + xlabel : str, default=None + The label used for the x-axis. If `None`, an attempt is made to + extract a label from `X` if it is a dataframe, otherwise an empty + string is used. + + ylabel : str, default=None + The label used for the y-axis. If `None`, an attempt is made to + extract a label from `X` if it is a dataframe, otherwise an empty + string is used. + + ax : Matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Additional keyword arguments to be passed to the + `plot_method`. + + Returns + ------- + display : :class:`~sklearn.inspection.DecisionBoundaryDisplay` + Object that stores the result. + + See Also + -------- + DecisionBoundaryDisplay : Decision boundary visualization. + sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the + confusion matrix given an estimator, the data, and the label. + sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the + confusion matrix given the true and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import DecisionBoundaryDisplay + >>> iris = load_iris() + >>> X = iris.data[:, :2] + >>> classifier = LogisticRegression().fit(X, iris.target) + >>> disp = DecisionBoundaryDisplay.from_estimator( + ... classifier, X, response_method="predict", + ... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], + ... alpha=0.5, + ... ) + >>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k") + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + check_is_fitted(estimator) + + if not grid_resolution > 1: + raise ValueError( + "grid_resolution must be greater than 1. Got" + f" {grid_resolution} instead." + ) + + if not eps >= 0: + raise ValueError( + f"eps must be greater than or equal to 0. Got {eps} instead." + ) + + possible_plot_methods = ("contourf", "contour", "pcolormesh") + if plot_method not in possible_plot_methods: + available_methods = ", ".join(possible_plot_methods) + raise ValueError( + f"plot_method must be one of {available_methods}. " + f"Got {plot_method} instead." + ) + + num_features = _num_features(X) + if num_features != 2: + raise ValueError( + f"n_features must be equal to 2. Got {num_features} instead." + ) + + x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1) + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + xx0, xx1 = np.meshgrid( + np.linspace(x0_min, x0_max, grid_resolution), + np.linspace(x1_min, x1_max, grid_resolution), + ) + if hasattr(X, "iloc"): + # we need to preserve the feature names and therefore get an empty dataframe + X_grid = X.iloc[[], :].copy() + X_grid.iloc[:, 0] = xx0.ravel() + X_grid.iloc[:, 1] = xx1.ravel() + else: + X_grid = np.c_[xx0.ravel(), xx1.ravel()] + + prediction_method = _check_boundary_response_method( + estimator, response_method, class_of_interest + ) + try: + response, _, response_method_used = _get_response_values( + estimator, + X_grid, + response_method=prediction_method, + pos_label=class_of_interest, + return_response_method_used=True, + ) + except ValueError as exc: + if "is not a valid label" in str(exc): + # re-raise a more informative error message since `pos_label` is unknown + # to our user when interacting with + # `DecisionBoundaryDisplay.from_estimator` + raise ValueError( + f"class_of_interest={class_of_interest} is not a valid label: It " + f"should be one of {estimator.classes_}" + ) from exc + raise + + # convert classes predictions into integers + if response_method_used == "predict" and hasattr(estimator, "classes_"): + encoder = LabelEncoder() + encoder.classes_ = estimator.classes_ + response = encoder.transform(response) + + if response.ndim != 1: + if is_regressor(estimator): + raise ValueError("Multi-output regressors are not supported") + + # For the multiclass case, `_get_response_values` returns the response + # as-is. Thus, we have a column per class and we need to select the column + # corresponding to the positive class. + col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0] + response = response[:, col_idx] + + if xlabel is None: + xlabel = X.columns[0] if hasattr(X, "columns") else "" + + if ylabel is None: + ylabel = X.columns[1] if hasattr(X, "columns") else "" + + display = cls( + xx0=xx0, + xx1=xx1, + response=response.reshape(xx0.shape), + xlabel=xlabel, + ylabel=ylabel, + ) + return display.plot(ax=ax, plot_method=plot_method, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..078db1a3260007e50bc63edcf17e3d7811cacf2a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py @@ -0,0 +1,1473 @@ +import numbers +from itertools import chain +from math import ceil + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ...base import is_regressor +from ...utils import ( + Bunch, + _safe_indexing, + check_array, + check_matplotlib_support, # noqa + check_random_state, +) +from ...utils._encode import _unique +from ...utils.parallel import Parallel, delayed +from .. import partial_dependence +from .._pd_utils import _check_feature_names, _get_feature_index + + +class PartialDependenceDisplay: + """Partial Dependence Plot (PDP). + + This can also display individual partial dependencies which are often + referred to as: Individual Condition Expectation (ICE). + + It is recommended to use + :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a + :class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are + stored as attributes. + + Read more in + :ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py` + and the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + pd_results : list of Bunch + Results of :func:`~sklearn.inspection.partial_dependence` for + ``features``. + + features : list of (int,) or list of (int, int) + Indices of features for a given plot. A tuple of one integer will plot + a partial dependence curve of one feature. A tuple of two integers will + plot a two-way partial dependence curve as a contour plot. + + feature_names : list of str + Feature names corresponding to the indices in ``features``. + + target_idx : int + + - In a multiclass setting, specifies the class for which the PDPs + should be computed. Note that for binary classification, the + positive class (index 1) is always used. + - In a multioutput setting, specifies the task for which the PDPs + should be computed. + + Ignored in binary classification or classical regression settings. + + deciles : dict + Deciles for feature indices in ``features``. + + kind : {'average', 'individual', 'both'} or list of such str, \ + default='average' + Whether to plot the partial dependence averaged across all the samples + in the dataset or one line per sample or both. + + - ``kind='average'`` results in the traditional PD plot; + - ``kind='individual'`` results in the ICE plot; + - ``kind='both'`` results in plotting both the ICE and PD on the same + plot. + + A list of such strings can be provided to specify `kind` on a per-plot + basis. The length of the list should be the same as the number of + interaction requested in `features`. + + .. note:: + ICE ('individual' or 'both') is not a valid option for 2-ways + interactions plot. As a result, an error will be raised. + 2-ways interaction plots should always be configured to + use the 'average' kind instead. + + .. note:: + The fast ``method='recursion'`` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + Add `kind` parameter with `'average'`, `'individual'`, and `'both'` + options. + + .. versionadded:: 1.1 + Add the possibility to pass a list of string specifying `kind` + for each plot. + + subsample : float, int or None, default=1000 + Sampling for ICE curves when `kind` is 'individual' or 'both'. + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to be used to plot ICE curves. If int, represents the + maximum absolute number of samples to use. + + Note that the full dataset is still used to calculate partial + dependence when `kind='both'`. + + .. versionadded:: 0.24 + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the selected samples when subsamples is not + `None`. See :term:`Glossary ` for details. + + .. versionadded:: 0.24 + + is_categorical : list of (bool,) or list of (bool, bool), default=None + Whether each target feature in `features` is categorical or not. + The list should be same size as `features`. If `None`, all features + are assumed to be continuous. + + .. versionadded:: 1.2 + + Attributes + ---------- + bounding_ax_ : matplotlib Axes or None + If `ax` is an axes or None, the `bounding_ax_` is the axes where the + grid of partial dependence plots are drawn. If `ax` is a list of axes + or a numpy array of axes, `bounding_ax_` is None. + + axes_ : ndarray of matplotlib Axes + If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row + and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item + in `ax`. Elements that are None correspond to a nonexisting axes in + that position. + + lines_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `lines_[i, j]` is the partial dependence + curve on the i-th row and j-th column. If `ax` is a list of axes, + `lines_[i]` is the partial dependence curve corresponding to the i-th + item in `ax`. Elements that are None correspond to a nonexisting axes + or an axes that does not include a line plot. + + deciles_vlines_ : ndarray of matplotlib LineCollection + If `ax` is an axes or None, `vlines_[i, j]` is the line collection + representing the x axis deciles of the i-th row and j-th column. If + `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in + `ax`. Elements that are None correspond to a nonexisting axes or an + axes that does not include a PDP plot. + + .. versionadded:: 0.23 + + deciles_hlines_ : ndarray of matplotlib LineCollection + If `ax` is an axes or None, `vlines_[i, j]` is the line collection + representing the y axis deciles of the i-th row and j-th column. If + `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in + `ax`. Elements that are None correspond to a nonexisting axes or an + axes that does not include a 2-way plot. + + .. versionadded:: 0.23 + + contours_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `contours_[i, j]` is the partial dependence + plot on the i-th row and j-th column. If `ax` is a list of axes, + `contours_[i]` is the partial dependence plot corresponding to the i-th + item in `ax`. Elements that are None correspond to a nonexisting axes + or an axes that does not include a contour plot. + + bars_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar + plot on the i-th row and j-th column (for a categorical feature). + If `ax` is a list of axes, `bars_[i]` is the partial dependence bar + plot corresponding to the i-th item in `ax`. Elements that are None + correspond to a nonexisting axes or an axes that does not include a + bar plot. + + .. versionadded:: 1.2 + + heatmaps_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence + heatmap on the i-th row and j-th column (for a pair of categorical + features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial + dependence heatmap corresponding to the i-th item in `ax`. Elements + that are None correspond to a nonexisting axes or an axes that does not + include a heatmap. + + .. versionadded:: 1.2 + + figure_ : matplotlib Figure + Figure containing partial dependence plots. + + See Also + -------- + partial_dependence : Compute Partial Dependence values. + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.inspection import PartialDependenceDisplay + >>> from sklearn.inspection import partial_dependence + >>> X, y = make_friedman1() + >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) + >>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])] + >>> deciles = {0: np.linspace(0, 1, num=5)} + >>> pd_results = partial_dependence( + ... clf, X, features=0, kind="average", grid_resolution=5) + >>> display = PartialDependenceDisplay( + ... [pd_results], features=features, feature_names=feature_names, + ... target_idx=0, deciles=deciles + ... ) + >>> display.plot(pdp_lim={1: (-1.38, 0.66)}) + <...> + >>> plt.show() + """ + + def __init__( + self, + pd_results, + *, + features, + feature_names, + target_idx, + deciles, + kind="average", + subsample=1000, + random_state=None, + is_categorical=None, + ): + self.pd_results = pd_results + self.features = features + self.feature_names = feature_names + self.target_idx = target_idx + self.deciles = deciles + self.kind = kind + self.subsample = subsample + self.random_state = random_state + self.is_categorical = is_categorical + + @classmethod + def from_estimator( + cls, + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + target=None, + response_method="auto", + n_cols=3, + grid_resolution=100, + percentiles=(0.05, 0.95), + method="auto", + n_jobs=None, + verbose=0, + line_kw=None, + ice_lines_kw=None, + pd_line_kw=None, + contour_kw=None, + ax=None, + kind="average", + centered=False, + subsample=1000, + random_state=None, + ): + """Partial dependence (PD) and individual conditional expectation (ICE) plots. + + Partial dependence plots, individual conditional expectation plots or an + overlay of both of them can be plotted by setting the ``kind`` + parameter. The ``len(features)`` plots are arranged in a grid with + ``n_cols`` columns. Two-way partial dependence plots are plotted as + contour plots. The deciles of the feature values will be shown with tick + marks on the x-axes for one-way plots, and on both axes for two-way + plots. + + Read more in the :ref:`User Guide `. + + .. note:: + + :func:`PartialDependenceDisplay.from_estimator` does not support using the + same axes with multiple calls. To plot the partial dependence for + multiple estimators, please pass the axes created by the first call to the + second call:: + + >>> from sklearn.inspection import PartialDependenceDisplay + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.ensemble import RandomForestRegressor + >>> X, y = make_friedman1() + >>> est1 = LinearRegression().fit(X, y) + >>> est2 = RandomForestRegressor().fit(X, y) + >>> disp1 = PartialDependenceDisplay.from_estimator(est1, X, + ... [1, 2]) + >>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2], + ... ax=disp1.axes_) + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is `'brute'`. + + features : list of {int, str, pair of int, pair of str} + The target features for which to create the PDPs. + If `features[i]` is an integer or a string, a one-way PDP is created; + if `features[i]` is a tuple, a two-way PDP is created (only supported + with `kind='average'`). Each tuple must be of size 2. + If any entry is a string, then it must be in ``feature_names``. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + target : int, default=None + - In a multiclass setting, specifies the class for which the PDPs + should be computed. Note that for binary classification, the + positive class (index 1) is always used. + - In a multioutput setting, specifies the task for which the PDPs + should be computed. + + Ignored in binary classification or classical regression settings. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is `'recursion'`, the response is always the output of + :term:`decision_function`. + + n_cols : int, default=3 + The maximum number of columns in the grid plot. Only active when `ax` + is a single axis or `None`. + + grid_resolution : int, default=100 + The number of equally spaced points on the axes of the plots, for each + target feature. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the PDP axes. Must be in [0, 1]. + + method : str, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor` + but is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the ICEs by design, it is not compatible with ICE and + thus `kind` must be `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + n_jobs : int, default=None + The number of CPUs to use to compute the partial dependences. + Computation is parallelized over features specified by the `features` + parameter. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + Verbose output during PD computations. + + line_kw : dict, default=None + Dict with keywords passed to the ``matplotlib.pyplot.plot`` call. + For one-way partial dependence plots. It can be used to define common + properties for both `ice_lines_kw` and `pdp_line_kw`. + + ice_lines_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For ICE lines in the one-way partial dependence plots. + The key value pairs defined in `ice_lines_kw` takes priority over + `line_kw`. + + pd_line_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For partial dependence in one-way partial dependence plots. + The key value pairs defined in `pd_line_kw` takes priority over + `line_kw`. + + contour_kw : dict, default=None + Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call. + For two-way partial dependence plots. + + ax : Matplotlib axes or array-like of Matplotlib axes, default=None + - If a single axis is passed in, it is treated as a bounding axes + and a grid of partial dependence plots will be drawn within + these bounds. The `n_cols` parameter controls the number of + columns in the grid. + - If an array-like of axes are passed in, the partial dependence + plots will be drawn directly into these axes. + - If `None`, a figure and a bounding axes is created and treated + as the single axes case. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to plot the partial dependence averaged across all the samples + in the dataset or one line per sample or both. + + - ``kind='average'`` results in the traditional PD plot; + - ``kind='individual'`` results in the ICE plot. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + centered : bool, default=False + If `True`, the ICE and PD lines will start at the origin of the + y-axis. By default, no centering is done. + + .. versionadded:: 1.1 + + subsample : float, int or None, default=1000 + Sampling for ICE curves when `kind` is 'individual' or 'both'. + If `float`, should be between 0.0 and 1.0 and represent the proportion + of the dataset to be used to plot ICE curves. If `int`, represents the + absolute number samples to use. + + Note that the full dataset is still used to calculate averaged partial + dependence when `kind='both'`. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the selected samples when subsamples is not + `None` and `kind` is either `'both'` or `'individual'`. + See :term:`Glossary ` for details. + + Returns + ------- + display : :class:`~sklearn.inspection.PartialDependenceDisplay` + + See Also + -------- + partial_dependence : Compute Partial Dependence values. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.inspection import PartialDependenceDisplay + >>> X, y = make_friedman1() + >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) + >>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)]) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") # noqa + import matplotlib.pyplot as plt # noqa + + # set target_idx for multi-class estimators + if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2: + if target is None: + raise ValueError("target must be specified for multi-class") + target_idx = np.searchsorted(estimator.classes_, target) + if ( + not (0 <= target_idx < len(estimator.classes_)) + or estimator.classes_[target_idx] != target + ): + raise ValueError("target not in est.classes_, got {}".format(target)) + else: + # regression and binary classification + target_idx = 0 + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, force_all_finite="allow-nan", dtype=object) + n_features = X.shape[1] + + feature_names = _check_feature_names(X, feature_names) + # expand kind to always be a list of str + kind_ = [kind] * len(features) if isinstance(kind, str) else kind + if len(kind_) != len(features): + raise ValueError( + "When `kind` is provided as a list of strings, it should contain " + f"as many elements as `features`. `kind` contains {len(kind_)} " + f"element(s) and `features` contains {len(features)} element(s)." + ) + + # convert features into a seq of int tuples + tmp_features, ice_for_two_way_pd = [], [] + for kind_plot, fxs in zip(kind_, features): + if isinstance(fxs, (numbers.Integral, str)): + fxs = (fxs,) + try: + fxs = tuple( + _get_feature_index(fx, feature_names=feature_names) for fx in fxs + ) + except TypeError as e: + raise ValueError( + "Each entry in features must be either an int, " + "a string, or an iterable of size at most 2." + ) from e + if not 1 <= np.size(fxs) <= 2: + raise ValueError( + "Each entry in features must be either an int, " + "a string, or an iterable of size at most 2." + ) + # store the information if 2-way PD was requested with ICE to later + # raise a ValueError with an exhaustive list of problematic + # settings. + ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1) + + tmp_features.append(fxs) + + if any(ice_for_two_way_pd): + # raise an error and be specific regarding the parameter values + # when 1- and 2-way PD were requested + kind_ = [ + "average" if forcing_average else kind_plot + for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_) + ] + raise ValueError( + "ICE plot cannot be rendered for 2-way feature interactions. " + "2-way feature interactions mandates PD plots using the " + "'average' kind: " + f"features={features!r} should be configured to use " + f"kind={kind_!r} explicitly." + ) + features = tmp_features + + if categorical_features is None: + is_categorical = [ + (False,) if len(fxs) == 1 else (False, False) for fxs in features + ] + else: + # we need to create a boolean indicator of which features are + # categorical from the categorical_features list. + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [ + tuple(categorical_features[fx] for fx in fxs) for fxs in features + ] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + tuple([idx in categorical_features_idx for idx in fxs]) + for fxs in features + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + for cats in is_categorical: + if np.size(cats) == 2 and (cats[0] != cats[1]): + raise ValueError( + "Two-way partial dependence plots are not supported for pairs" + " of continuous and categorical features." + ) + + # collect the indices of the categorical features targeted by the partial + # dependence computation + categorical_features_targeted = set( + [ + fx + for fxs, cats in zip(features, is_categorical) + for fx in fxs + if any(cats) + ] + ) + if categorical_features_targeted: + min_n_cats = min( + [ + len(_unique(_safe_indexing(X, idx, axis=1))) + for idx in categorical_features_targeted + ] + ) + if grid_resolution < min_n_cats: + raise ValueError( + "The resolution of the computed grid is less than the " + "minimum number of categories in the targeted categorical " + "features. Expect the `grid_resolution` to be greater than " + f"{min_n_cats}. Got {grid_resolution} instead." + ) + + for is_cat, kind_plot in zip(is_categorical, kind_): + if any(is_cat) and kind_plot != "average": + raise ValueError( + "It is not possible to display individual effects for" + " categorical features." + ) + + # Early exit if the axes does not have the correct number of axes + if ax is not None and not isinstance(ax, plt.Axes): + axes = np.asarray(ax, dtype=object) + if axes.size != len(features): + raise ValueError( + "Expected ax to have {} axes, got {}".format( + len(features), axes.size + ) + ) + + for i in chain.from_iterable(features): + if i >= len(feature_names): + raise ValueError( + "All entries of features must be less than " + "len(feature_names) = {0}, got {1}.".format(len(feature_names), i) + ) + + if isinstance(subsample, numbers.Integral): + if subsample <= 0: + raise ValueError( + f"When an integer, subsample={subsample} should be positive." + ) + elif isinstance(subsample, numbers.Real): + if subsample <= 0 or subsample >= 1: + raise ValueError( + f"When a floating-point, subsample={subsample} should be in " + "the (0, 1) range." + ) + + # compute predictions and/or averaged predictions + pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(partial_dependence)( + estimator, + X, + fxs, + sample_weight=sample_weight, + feature_names=feature_names, + categorical_features=categorical_features, + response_method=response_method, + method=method, + grid_resolution=grid_resolution, + percentiles=percentiles, + kind=kind_plot, + ) + for kind_plot, fxs in zip(kind_, features) + ) + + # For multioutput regression, we can only check the validity of target + # now that we have the predictions. + # Also note: as multiclass-multioutput classifiers are not supported, + # multiclass and multioutput scenario are mutually exclusive. So there is + # no risk of overwriting target_idx here. + pd_result = pd_results[0] # checking the first result is enough + n_tasks = ( + pd_result.average.shape[0] + if kind_[0] == "average" + else pd_result.individual.shape[0] + ) + if is_regressor(estimator) and n_tasks > 1: + if target is None: + raise ValueError("target must be specified for multi-output regressors") + if not 0 <= target <= n_tasks: + raise ValueError( + "target must be in [0, n_tasks], got {}.".format(target) + ) + target_idx = target + + deciles = {} + for fxs, cats in zip(features, is_categorical): + for fx, cat in zip(fxs, cats): + if not cat and fx not in deciles: + X_col = _safe_indexing(X, fx, axis=1) + deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1)) + + display = cls( + pd_results=pd_results, + features=features, + feature_names=feature_names, + target_idx=target_idx, + deciles=deciles, + kind=kind, + subsample=subsample, + random_state=random_state, + is_categorical=is_categorical, + ) + return display.plot( + ax=ax, + n_cols=n_cols, + line_kw=line_kw, + ice_lines_kw=ice_lines_kw, + pd_line_kw=pd_line_kw, + contour_kw=contour_kw, + centered=centered, + ) + + def _get_sample_count(self, n_samples): + """Compute the number of samples as an integer.""" + if isinstance(self.subsample, numbers.Integral): + if self.subsample < n_samples: + return self.subsample + return n_samples + elif isinstance(self.subsample, numbers.Real): + return ceil(n_samples * self.subsample) + return n_samples + + def _plot_ice_lines( + self, + preds, + feature_values, + n_ice_to_plot, + ax, + pd_plot_idx, + n_total_lines_by_plot, + individual_line_kw, + ): + """Plot the ICE lines. + + Parameters + ---------- + preds : ndarray of shape \ + (n_instances, n_grid_points) + The predictions computed for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + n_ice_to_plot : int + The number of ICE lines to plot. + ax : Matplotlib axes + The axis on which to plot the ICE lines. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + n_total_lines_by_plot : int + The total number of lines expected to be plot on the axis. + individual_line_kw : dict + Dict with keywords passed when plotting the ICE lines. + """ + rng = check_random_state(self.random_state) + # subsample ice + ice_lines_idx = rng.choice( + preds.shape[0], + n_ice_to_plot, + replace=False, + ) + ice_lines_subsampled = preds[ice_lines_idx, :] + # plot the subsampled ice + for ice_idx, ice in enumerate(ice_lines_subsampled): + line_idx = np.unravel_index( + pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape + ) + self.lines_[line_idx] = ax.plot( + feature_values, ice.ravel(), **individual_line_kw + )[0] + + def _plot_average_dependence( + self, + avg_preds, + feature_values, + ax, + pd_line_idx, + line_kw, + categorical, + bar_kw, + ): + """Plot the average partial dependence. + + Parameters + ---------- + avg_preds : ndarray of shape (n_grid_points,) + The average predictions for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + ax : Matplotlib axes + The axis on which to plot the average PD. + pd_line_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + line_kw : dict + Dict with keywords passed when plotting the PD plot. + categorical : bool + Whether feature is categorical. + bar_kw: dict + Dict with keywords passed when plotting the PD bars (categorical). + """ + if categorical: + bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape) + self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0] + ax.tick_params(axis="x", rotation=90) + else: + line_idx = np.unravel_index(pd_line_idx, self.lines_.shape) + self.lines_[line_idx] = ax.plot( + feature_values, + avg_preds, + **line_kw, + )[0] + + def _plot_one_way_partial_dependence( + self, + kind, + preds, + avg_preds, + feature_values, + feature_idx, + n_ice_lines, + ax, + n_cols, + pd_plot_idx, + n_lines, + ice_lines_kw, + pd_line_kw, + categorical, + bar_kw, + pdp_lim, + ): + """Plot 1-way partial dependence: ICE and PDP. + + Parameters + ---------- + kind : str + The kind of partial plot to draw. + preds : ndarray of shape \ + (n_instances, n_grid_points) or None + The predictions computed for all points of `feature_values` for a + given feature for all samples in `X`. + avg_preds : ndarray of shape (n_grid_points,) + The average predictions for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + feature_idx : int + The index corresponding to the target feature. + n_ice_lines : int + The number of ICE lines to plot. + ax : Matplotlib axes + The axis on which to plot the ICE and PDP lines. + n_cols : int or None + The number of column in the axis. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + n_lines : int + The total number of lines expected to be plot on the axis. + ice_lines_kw : dict + Dict with keywords passed when plotting the ICE lines. + pd_line_kw : dict + Dict with keywords passed when plotting the PD plot. + categorical : bool + Whether feature is categorical. + bar_kw: dict + Dict with keywords passed when plotting the PD bars (categorical). + pdp_lim : dict + Global min and max average predictions, such that all plots will + have the same scale and y limits. `pdp_lim[1]` is the global min + and max for single partial dependence curves. + """ + from matplotlib import transforms # noqa + + if kind in ("individual", "both"): + self._plot_ice_lines( + preds[self.target_idx], + feature_values, + n_ice_lines, + ax, + pd_plot_idx, + n_lines, + ice_lines_kw, + ) + + if kind in ("average", "both"): + # the average is stored as the last line + if kind == "average": + pd_line_idx = pd_plot_idx + else: + pd_line_idx = pd_plot_idx * n_lines + n_ice_lines + self._plot_average_dependence( + avg_preds[self.target_idx].ravel(), + feature_values, + ax, + pd_line_idx, + pd_line_kw, + categorical, + bar_kw, + ) + + trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) + # create the decile line for the vertical axis + vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) + if self.deciles.get(feature_idx[0], None) is not None: + self.deciles_vlines_[vlines_idx] = ax.vlines( + self.deciles[feature_idx[0]], + 0, + 0.05, + transform=trans, + color="k", + ) + # reset ylim which was overwritten by vlines + min_val = min(val[0] for val in pdp_lim.values()) + max_val = max(val[1] for val in pdp_lim.values()) + ax.set_ylim([min_val, max_val]) + + # Set xlabel if it is not already set + if not ax.get_xlabel(): + ax.set_xlabel(self.feature_names[feature_idx[0]]) + + if n_cols is None or pd_plot_idx % n_cols == 0: + if not ax.get_ylabel(): + ax.set_ylabel("Partial dependence") + else: + ax.set_yticklabels([]) + + if pd_line_kw.get("label", None) and kind != "individual" and not categorical: + ax.legend() + + def _plot_two_way_partial_dependence( + self, + avg_preds, + feature_values, + feature_idx, + ax, + pd_plot_idx, + Z_level, + contour_kw, + categorical, + heatmap_kw, + ): + """Plot 2-way partial dependence. + + Parameters + ---------- + avg_preds : ndarray of shape \ + (n_instances, n_grid_points, n_grid_points) + The average predictions for all points of `feature_values[0]` and + `feature_values[1]` for some given features for all samples in `X`. + feature_values : seq of 1d array + A sequence of array of the feature values for which the predictions + have been computed. + feature_idx : tuple of int + The indices of the target features + ax : Matplotlib axes + The axis on which to plot the ICE and PDP lines. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + Z_level : ndarray of shape (8, 8) + The Z-level used to encode the average predictions. + contour_kw : dict + Dict with keywords passed when plotting the contours. + categorical : bool + Whether features are categorical. + heatmap_kw: dict + Dict with keywords passed when plotting the PD heatmap + (categorical). + """ + if categorical: + import matplotlib.pyplot as plt + + default_im_kw = dict(interpolation="nearest", cmap="viridis") + im_kw = {**default_im_kw, **heatmap_kw} + + data = avg_preds[self.target_idx] + im = ax.imshow(data, **im_kw) + text = None + cmap_min, cmap_max = im.cmap(0), im.cmap(1.0) + + text = np.empty_like(data, dtype=object) + # print text with appropriate color depending on background + thresh = (data.max() + data.min()) / 2.0 + + for flat_index in range(data.size): + row, col = np.unravel_index(flat_index, data.shape) + color = cmap_max if data[row, col] < thresh else cmap_min + + values_format = ".2f" + text_data = format(data[row, col], values_format) + + text_kwargs = dict(ha="center", va="center", color=color) + text[row, col] = ax.text(col, row, text_data, **text_kwargs) + + fig = ax.figure + fig.colorbar(im, ax=ax) + ax.set( + xticks=np.arange(len(feature_values[1])), + yticks=np.arange(len(feature_values[0])), + xticklabels=feature_values[1], + yticklabels=feature_values[0], + xlabel=self.feature_names[feature_idx[1]], + ylabel=self.feature_names[feature_idx[0]], + ) + + plt.setp(ax.get_xticklabels(), rotation="vertical") + + heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape) + self.heatmaps_[heatmap_idx] = im + else: + from matplotlib import transforms # noqa + + XX, YY = np.meshgrid(feature_values[0], feature_values[1]) + Z = avg_preds[self.target_idx].T + CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors="k") + contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape) + self.contours_[contour_idx] = ax.contourf( + XX, + YY, + Z, + levels=Z_level, + vmax=Z_level[-1], + vmin=Z_level[0], + **contour_kw, + ) + ax.clabel(CS, fmt="%2.2f", colors="k", fontsize=10, inline=True) + + trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) + # create the decile line for the vertical axis + xlim, ylim = ax.get_xlim(), ax.get_ylim() + vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) + self.deciles_vlines_[vlines_idx] = ax.vlines( + self.deciles[feature_idx[0]], + 0, + 0.05, + transform=trans, + color="k", + ) + # create the decile line for the horizontal axis + hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape) + self.deciles_hlines_[hlines_idx] = ax.hlines( + self.deciles[feature_idx[1]], + 0, + 0.05, + transform=trans, + color="k", + ) + # reset xlim and ylim since they are overwritten by hlines and + # vlines + ax.set_xlim(xlim) + ax.set_ylim(ylim) + + # set xlabel if it is not already set + if not ax.get_xlabel(): + ax.set_xlabel(self.feature_names[feature_idx[0]]) + ax.set_ylabel(self.feature_names[feature_idx[1]]) + + def plot( + self, + *, + ax=None, + n_cols=3, + line_kw=None, + ice_lines_kw=None, + pd_line_kw=None, + contour_kw=None, + bar_kw=None, + heatmap_kw=None, + pdp_lim=None, + centered=False, + ): + """Plot partial dependence plots. + + Parameters + ---------- + ax : Matplotlib axes or array-like of Matplotlib axes, default=None + - If a single axis is passed in, it is treated as a bounding axes + and a grid of partial dependence plots will be drawn within + these bounds. The `n_cols` parameter controls the number of + columns in the grid. + - If an array-like of axes are passed in, the partial dependence + plots will be drawn directly into these axes. + - If `None`, a figure and a bounding axes is created and treated + as the single axes case. + + n_cols : int, default=3 + The maximum number of columns in the grid plot. Only active when + `ax` is a single axes or `None`. + + line_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.plot` call. + For one-way partial dependence plots. + + ice_lines_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For ICE lines in the one-way partial dependence plots. + The key value pairs defined in `ice_lines_kw` takes priority over + `line_kw`. + + .. versionadded:: 1.0 + + pd_line_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For partial dependence in one-way partial dependence plots. + The key value pairs defined in `pd_line_kw` takes priority over + `line_kw`. + + .. versionadded:: 1.0 + + contour_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.contourf` + call for two-way partial dependence plots. + + bar_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.bar` + call for one-way categorical partial dependence plots. + + .. versionadded:: 1.2 + + heatmap_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.imshow` + call for two-way categorical partial dependence plots. + + .. versionadded:: 1.2 + + pdp_lim : dict, default=None + Global min and max average predictions, such that all plots will have the + same scale and y limits. `pdp_lim[1]` is the global min and max for single + partial dependence curves. `pdp_lim[2]` is the global min and max for + two-way partial dependence curves. If `None` (default), the limit will be + inferred from the global minimum and maximum of all predictions. + + .. versionadded:: 1.1 + + centered : bool, default=False + If `True`, the ICE and PD lines will start at the origin of the + y-axis. By default, no centering is done. + + .. versionadded:: 1.1 + + Returns + ------- + display : :class:`~sklearn.inspection.PartialDependenceDisplay` + Returns a :class:`~sklearn.inspection.PartialDependenceDisplay` + object that contains the partial dependence plots. + """ + + check_matplotlib_support("plot_partial_dependence") + import matplotlib.pyplot as plt # noqa + from matplotlib.gridspec import GridSpecFromSubplotSpec # noqa + + if isinstance(self.kind, str): + kind = [self.kind] * len(self.features) + else: + kind = self.kind + + if self.is_categorical is None: + is_categorical = [ + (False,) if len(fx) == 1 else (False, False) for fx in self.features + ] + else: + is_categorical = self.is_categorical + + if len(kind) != len(self.features): + raise ValueError( + "When `kind` is provided as a list of strings, it should " + "contain as many elements as `features`. `kind` contains " + f"{len(kind)} element(s) and `features` contains " + f"{len(self.features)} element(s)." + ) + + valid_kinds = {"average", "individual", "both"} + if any([k not in valid_kinds for k in kind]): + raise ValueError( + f"Values provided to `kind` must be one of: {valid_kinds!r} or a list" + f" of such values. Currently, kind={self.kind!r}" + ) + + # Center results before plotting + if not centered: + pd_results_ = self.pd_results + else: + pd_results_ = [] + for kind_plot, pd_result in zip(kind, self.pd_results): + current_results = {"grid_values": pd_result["grid_values"]} + + if kind_plot in ("individual", "both"): + preds = pd_result.individual + preds = preds - preds[self.target_idx, :, 0, None] + current_results["individual"] = preds + + if kind_plot in ("average", "both"): + avg_preds = pd_result.average + avg_preds = avg_preds - avg_preds[self.target_idx, 0, None] + current_results["average"] = avg_preds + + pd_results_.append(Bunch(**current_results)) + + if pdp_lim is None: + # get global min and max average predictions of PD grouped by plot type + pdp_lim = {} + for kind_plot, pdp in zip(kind, pd_results_): + values = pdp["grid_values"] + preds = pdp.average if kind_plot == "average" else pdp.individual + min_pd = preds[self.target_idx].min() + max_pd = preds[self.target_idx].max() + + # expand the limits to account so that the plotted lines do not touch + # the edges of the plot + span = max_pd - min_pd + min_pd -= 0.05 * span + max_pd += 0.05 * span + + n_fx = len(values) + old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) + min_pd = min(min_pd, old_min_pd) + max_pd = max(max_pd, old_max_pd) + pdp_lim[n_fx] = (min_pd, max_pd) + + if line_kw is None: + line_kw = {} + if ice_lines_kw is None: + ice_lines_kw = {} + if pd_line_kw is None: + pd_line_kw = {} + if bar_kw is None: + bar_kw = {} + if heatmap_kw is None: + heatmap_kw = {} + + if ax is None: + _, ax = plt.subplots() + + if contour_kw is None: + contour_kw = {} + default_contour_kws = {"alpha": 0.75} + contour_kw = {**default_contour_kws, **contour_kw} + + n_features = len(self.features) + is_average_plot = [kind_plot == "average" for kind_plot in kind] + if all(is_average_plot): + # only average plots are requested + n_ice_lines = 0 + n_lines = 1 + else: + # we need to determine the number of ICE samples computed + ice_plot_idx = is_average_plot.index(False) + n_ice_lines = self._get_sample_count( + len(pd_results_[ice_plot_idx].individual[0]) + ) + if any([kind_plot == "both" for kind_plot in kind]): + n_lines = n_ice_lines + 1 # account for the average line + else: + n_lines = n_ice_lines + + if isinstance(ax, plt.Axes): + # If ax was set off, it has most likely been set to off + # by a previous call to plot. + if not ax.axison: + raise ValueError( + "The ax was already used in another plot " + "function, please set ax=display.axes_ " + "instead" + ) + + ax.set_axis_off() + self.bounding_ax_ = ax + self.figure_ = ax.figure + + n_cols = min(n_cols, n_features) + n_rows = int(np.ceil(n_features / float(n_cols))) + + self.axes_ = np.empty((n_rows, n_cols), dtype=object) + if all(is_average_plot): + self.lines_ = np.empty((n_rows, n_cols), dtype=object) + else: + self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object) + self.contours_ = np.empty((n_rows, n_cols), dtype=object) + self.bars_ = np.empty((n_rows, n_cols), dtype=object) + self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object) + + axes_ravel = self.axes_.ravel() + + gs = GridSpecFromSubplotSpec( + n_rows, n_cols, subplot_spec=ax.get_subplotspec() + ) + for i, spec in zip(range(n_features), gs): + axes_ravel[i] = self.figure_.add_subplot(spec) + + else: # array-like + ax = np.asarray(ax, dtype=object) + if ax.size != n_features: + raise ValueError( + "Expected ax to have {} axes, got {}".format(n_features, ax.size) + ) + + if ax.ndim == 2: + n_cols = ax.shape[1] + else: + n_cols = None + + self.bounding_ax_ = None + self.figure_ = ax.ravel()[0].figure + self.axes_ = ax + if all(is_average_plot): + self.lines_ = np.empty_like(ax, dtype=object) + else: + self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object) + self.contours_ = np.empty_like(ax, dtype=object) + self.bars_ = np.empty_like(ax, dtype=object) + self.heatmaps_ = np.empty_like(ax, dtype=object) + + # create contour levels for two-way plots + if 2 in pdp_lim: + Z_level = np.linspace(*pdp_lim[2], num=8) + + self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object) + self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object) + + for pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot) in enumerate( + zip( + self.axes_.ravel(), + self.features, + is_categorical, + pd_results_, + kind, + ) + ): + avg_preds = None + preds = None + feature_values = pd_result["grid_values"] + if kind_plot == "individual": + preds = pd_result.individual + elif kind_plot == "average": + avg_preds = pd_result.average + else: # kind_plot == 'both' + avg_preds = pd_result.average + preds = pd_result.individual + + if len(feature_values) == 1: + # define the line-style for the current plot + default_line_kws = { + "color": "C0", + "label": "average" if kind_plot == "both" else None, + } + if kind_plot == "individual": + default_ice_lines_kws = {"alpha": 0.3, "linewidth": 0.5} + default_pd_lines_kws = {} + elif kind_plot == "both": + # by default, we need to distinguish the average line from + # the individual lines via color and line style + default_ice_lines_kws = { + "alpha": 0.3, + "linewidth": 0.5, + "color": "tab:blue", + } + default_pd_lines_kws = { + "color": "tab:orange", + "linestyle": "--", + } + else: + default_ice_lines_kws = {} + default_pd_lines_kws = {} + + ice_lines_kw = { + **default_line_kws, + **default_ice_lines_kws, + **line_kw, + **ice_lines_kw, + } + del ice_lines_kw["label"] + + pd_line_kw = { + **default_line_kws, + **default_pd_lines_kws, + **line_kw, + **pd_line_kw, + } + + default_bar_kws = {"color": "C0"} + bar_kw = {**default_bar_kws, **bar_kw} + + default_heatmap_kw = {} + heatmap_kw = {**default_heatmap_kw, **heatmap_kw} + + self._plot_one_way_partial_dependence( + kind_plot, + preds, + avg_preds, + feature_values[0], + feature_idx, + n_ice_lines, + axi, + n_cols, + pd_plot_idx, + n_lines, + ice_lines_kw, + pd_line_kw, + cat[0], + bar_kw, + pdp_lim, + ) + else: + self._plot_two_way_partial_dependence( + avg_preds, + feature_values, + feature_idx, + axi, + pd_plot_idx, + Z_level, + contour_kw, + cat[0] and cat[1], + heatmap_kw, + ) + + return self diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0db393c4bceb6c82d8e6c6b480677efb64b39309 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c6ea87191fa80a2598ab8eeb407f05370199e43 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c7c26ece9d4d1193871652ad74745eced68370e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb38f55445a08a28c1415db8a8b02cd2bd4c2dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py @@ -0,0 +1,609 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_multilabel_classification, +) +from sklearn.ensemble import IsolationForest +from sklearn.inspection import DecisionBoundaryDisplay +from sklearn.inspection._plot.decision_boundary import _check_boundary_response_method +from sklearn.linear_model import LogisticRegression +from sklearn.preprocessing import scale +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, +) + +# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved +pytestmark = pytest.mark.filterwarnings( + "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:" + "matplotlib.*" +) + + +X, y = make_classification( + n_informative=1, + n_redundant=1, + n_clusters_per_class=1, + n_features=2, + random_state=42, +) + + +def load_iris_2d_scaled(): + X, y = load_iris(return_X_y=True) + X = scale(X)[:, :2] + return X, y + + +@pytest.fixture(scope="module") +def fitted_clf(): + return LogisticRegression().fit(X, y) + + +def test_input_data_dimension(pyplot): + """Check that we raise an error when `X` does not have exactly 2 features.""" + X, y = make_classification(n_samples=10, n_features=4, random_state=0) + + clf = LogisticRegression().fit(X, y) + msg = "n_features must be equal to 2. Got 4 instead." + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator(estimator=clf, X=X) + + +def test_check_boundary_response_method_error(): + """Check that we raise an error for the cases not supported by + `_check_boundary_response_method`. + """ + + class MultiLabelClassifier: + classes_ = [np.array([0, 1]), np.array([0, 1])] + + err_msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=err_msg): + _check_boundary_response_method(MultiLabelClassifier(), "predict", None) + + class MulticlassClassifier: + classes_ = [0, 1, 2] + + err_msg = "Multiclass classifiers are only supported when `response_method` is" + for response_method in ("predict_proba", "decision_function"): + with pytest.raises(ValueError, match=err_msg): + _check_boundary_response_method( + MulticlassClassifier(), response_method, None + ) + + +@pytest.mark.parametrize( + "estimator, response_method, class_of_interest, expected_prediction_method", + [ + (DecisionTreeRegressor(), "predict", None, "predict"), + (DecisionTreeRegressor(), "auto", None, "predict"), + (LogisticRegression().fit(*load_iris_2d_scaled()), "predict", None, "predict"), + (LogisticRegression().fit(*load_iris_2d_scaled()), "auto", None, "predict"), + ( + LogisticRegression().fit(*load_iris_2d_scaled()), + "predict_proba", + 0, + "predict_proba", + ), + ( + LogisticRegression().fit(*load_iris_2d_scaled()), + "decision_function", + 0, + "decision_function", + ), + ( + LogisticRegression().fit(X, y), + "auto", + None, + ["decision_function", "predict_proba", "predict"], + ), + (LogisticRegression().fit(X, y), "predict", None, "predict"), + ( + LogisticRegression().fit(X, y), + ["predict_proba", "decision_function"], + None, + ["predict_proba", "decision_function"], + ), + ], +) +def test_check_boundary_response_method( + estimator, response_method, class_of_interest, expected_prediction_method +): + """Check the behaviour of `_check_boundary_response_method` for the supported + cases. + """ + prediction_method = _check_boundary_response_method( + estimator, response_method, class_of_interest + ) + assert prediction_method == expected_prediction_method + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_multiclass_error(pyplot, response_method): + """Check multiclass errors.""" + X, y = make_classification(n_classes=3, n_informative=3, random_state=0) + X = X[:, [0, 1]] + lr = LogisticRegression().fit(X, y) + + msg = ( + "Multiclass classifiers are only supported when `response_method` is 'predict'" + " or 'auto'" + ) + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator(lr, X, response_method=response_method) + + +@pytest.mark.parametrize("response_method", ["auto", "predict"]) +def test_multiclass(pyplot, response_method): + """Check multiclass gives expected results.""" + grid_resolution = 10 + eps = 1.0 + X, y = make_classification(n_classes=3, n_informative=3, random_state=0) + X = X[:, [0, 1]] + lr = LogisticRegression(random_state=0).fit(X, y) + + disp = DecisionBoundaryDisplay.from_estimator( + lr, X, response_method=response_method, grid_resolution=grid_resolution, eps=1.0 + ) + + x0_min, x0_max = X[:, 0].min() - eps, X[:, 0].max() + eps + x1_min, x1_max = X[:, 1].min() - eps, X[:, 1].max() + eps + xx0, xx1 = np.meshgrid( + np.linspace(x0_min, x0_max, grid_resolution), + np.linspace(x1_min, x1_max, grid_resolution), + ) + response = lr.predict(np.c_[xx0.ravel(), xx1.ravel()]) + assert_allclose(disp.response, response.reshape(xx0.shape)) + assert_allclose(disp.xx0, xx0) + assert_allclose(disp.xx1, xx1) + + +@pytest.mark.parametrize( + "kwargs, error_msg", + [ + ( + {"plot_method": "hello_world"}, + r"plot_method must be one of contourf, contour, pcolormesh. Got hello_world" + r" instead.", + ), + ( + {"grid_resolution": 1}, + r"grid_resolution must be greater than 1. Got 1 instead", + ), + ( + {"grid_resolution": -1}, + r"grid_resolution must be greater than 1. Got -1 instead", + ), + ({"eps": -1.1}, r"eps must be greater than or equal to 0. Got -1.1 instead"), + ], +) +def test_input_validation_errors(pyplot, kwargs, error_msg, fitted_clf): + """Check input validation from_estimator.""" + with pytest.raises(ValueError, match=error_msg): + DecisionBoundaryDisplay.from_estimator(fitted_clf, X, **kwargs) + + +def test_display_plot_input_error(pyplot, fitted_clf): + """Check input validation for `plot`.""" + disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, X, grid_resolution=5) + + with pytest.raises(ValueError, match="plot_method must be 'contourf'"): + disp.plot(plot_method="hello_world") + + +@pytest.mark.parametrize( + "response_method", ["auto", "predict", "predict_proba", "decision_function"] +) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_classifier( + pyplot, fitted_clf, response_method, plot_method +): + """Check that decision boundary is correct.""" + fig, ax = pyplot.subplots() + eps = 2.0 + disp = DecisionBoundaryDisplay.from_estimator( + fitted_clf, + X, + grid_resolution=5, + response_method=response_method, + plot_method=plot_method, + eps=eps, + ax=ax, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + fig2, ax2 = pyplot.subplots() + # change plotting method for second plot + disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") + assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) + assert disp.ax_ == ax2 + assert disp.figure_ == fig2 + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "decision_function"]) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_outlier_detector( + pyplot, response_method, plot_method +): + """Check that decision boundary is correct for outlier detector.""" + fig, ax = pyplot.subplots() + eps = 2.0 + outlier_detector = IsolationForest(random_state=0).fit(X, y) + disp = DecisionBoundaryDisplay.from_estimator( + outlier_detector, + X, + grid_resolution=5, + response_method=response_method, + plot_method=plot_method, + eps=eps, + ax=ax, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + +@pytest.mark.parametrize("response_method", ["auto", "predict"]) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_regressor(pyplot, response_method, plot_method): + """Check that we can display the decision boundary for a regressor.""" + X, y = load_diabetes(return_X_y=True) + X = X[:, :2] + tree = DecisionTreeRegressor().fit(X, y) + fig, ax = pyplot.subplots() + eps = 2.0 + disp = DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ax=ax, + eps=eps, + plot_method=plot_method, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + fig2, ax2 = pyplot.subplots() + # change plotting method for second plot + disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") + assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) + assert disp.ax_ == ax2 + assert disp.figure_ == fig2 + + +@pytest.mark.parametrize( + "response_method, msg", + [ + ( + "predict_proba", + "MyClassifier has none of the following attributes: predict_proba", + ), + ( + "decision_function", + "MyClassifier has none of the following attributes: decision_function", + ), + ( + "auto", + ( + "MyClassifier has none of the following attributes: decision_function, " + "predict_proba, predict" + ), + ), + ( + "bad_method", + "MyClassifier has none of the following attributes: bad_method", + ), + ], +) +def test_error_bad_response(pyplot, response_method, msg): + """Check errors for bad response.""" + + class MyClassifier(BaseEstimator, ClassifierMixin): + def fit(self, X, y): + self.fitted_ = True + self.classes_ = [0, 1] + return self + + clf = MyClassifier().fit(X, y) + + with pytest.raises(AttributeError, match=msg): + DecisionBoundaryDisplay.from_estimator(clf, X, response_method=response_method) + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"]) +def test_multilabel_classifier_error(pyplot, response_method): + """Check that multilabel classifier raises correct error.""" + X, y = make_multilabel_classification(random_state=0) + X = X[:, :2] + tree = DecisionTreeClassifier().fit(X, y) + + msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ) + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"]) +def test_multi_output_multi_class_classifier_error(pyplot, response_method): + """Check that multi-output multi-class classifier raises correct error.""" + X = np.asarray([[0, 1], [1, 2]]) + y = np.asarray([["tree", "cat"], ["cat", "tree"]]) + tree = DecisionTreeClassifier().fit(X, y) + + msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ) + + +def test_multioutput_regressor_error(pyplot): + """Check that multioutput regressor raises correct error.""" + X = np.asarray([[0, 1], [1, 2]]) + y = np.asarray([[0, 1], [4, 1]]) + tree = DecisionTreeRegressor().fit(X, y) + with pytest.raises(ValueError, match="Multi-output regressors are not supported"): + DecisionBoundaryDisplay.from_estimator(tree, X, response_method="predict") + + +@pytest.mark.parametrize( + "response_method", + ["predict_proba", "decision_function", ["predict_proba", "predict"]], +) +def test_regressor_unsupported_response(pyplot, response_method): + """Check that we can display the decision boundary for a regressor.""" + X, y = load_diabetes(return_X_y=True) + X = X[:, :2] + tree = DecisionTreeRegressor().fit(X, y) + err_msg = "should either be a classifier to be used with response_method" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator(tree, X, response_method=response_method) + + +@pytest.mark.filterwarnings( + # We expect to raise the following warning because the classifier is fit on a + # NumPy array + "ignore:X has feature names, but LogisticRegression was fitted without" +) +def test_dataframe_labels_used(pyplot, fitted_clf): + """Check that column names are used for pandas.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame(X, columns=["col_x", "col_y"]) + + # pandas column names are used by default + _, ax = pyplot.subplots() + disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, df, ax=ax) + assert ax.get_xlabel() == "col_x" + assert ax.get_ylabel() == "col_y" + + # second call to plot will have the names + fig, ax = pyplot.subplots() + disp.plot(ax=ax) + assert ax.get_xlabel() == "col_x" + assert ax.get_ylabel() == "col_y" + + # axes with a label will not get overridden + fig, ax = pyplot.subplots() + ax.set(xlabel="hello", ylabel="world") + disp.plot(ax=ax) + assert ax.get_xlabel() == "hello" + assert ax.get_ylabel() == "world" + + # labels get overridden only if provided to the `plot` method + disp.plot(ax=ax, xlabel="overwritten_x", ylabel="overwritten_y") + assert ax.get_xlabel() == "overwritten_x" + assert ax.get_ylabel() == "overwritten_y" + + # labels do not get inferred if provided to `from_estimator` + _, ax = pyplot.subplots() + disp = DecisionBoundaryDisplay.from_estimator( + fitted_clf, df, ax=ax, xlabel="overwritten_x", ylabel="overwritten_y" + ) + assert ax.get_xlabel() == "overwritten_x" + assert ax.get_ylabel() == "overwritten_y" + + +def test_string_target(pyplot): + """Check that decision boundary works with classifiers trained on string labels.""" + iris = load_iris() + X = iris.data[:, [0, 1]] + + # Use strings as target + y = iris.target_names[iris.target] + log_reg = LogisticRegression().fit(X, y) + + # Does not raise + DecisionBoundaryDisplay.from_estimator( + log_reg, + X, + grid_resolution=5, + response_method="predict", + ) + + +def test_dataframe_support(pyplot): + """Check that passing a dataframe at fit and to the Display does not + raise warnings. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23311 + """ + pd = pytest.importorskip("pandas") + df = pd.DataFrame(X, columns=["col_x", "col_y"]) + estimator = LogisticRegression().fit(df, y) + + with warnings.catch_warnings(): + # no warnings linked to feature names validation should be raised + warnings.simplefilter("error", UserWarning) + DecisionBoundaryDisplay.from_estimator(estimator, df, response_method="predict") + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_class_of_interest_binary(pyplot, response_method): + """Check the behaviour of passing `class_of_interest` for plotting the output of + `predict_proba` and `decision_function` in the binary case. + """ + iris = load_iris() + X = iris.data[:100, :2] + y = iris.target[:100] + assert_array_equal(np.unique(y), [0, 1]) + + estimator = LogisticRegression().fit(X, y) + # We will check that `class_of_interest=None` is equivalent to + # `class_of_interest=estimator.classes_[1]` + disp_default = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=None, + ) + disp_class_1 = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=estimator.classes_[1], + ) + + assert_allclose(disp_default.response, disp_class_1.response) + + # we can check that `_get_response_values` modifies the response when targeting + # the other class, i.e. 1 - p(y=1|x) for `predict_proba` and -decision_function + # for `decision_function`. + disp_class_0 = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=estimator.classes_[0], + ) + + if response_method == "predict_proba": + assert_allclose(disp_default.response, 1 - disp_class_0.response) + else: + assert response_method == "decision_function" + assert_allclose(disp_default.response, -disp_class_0.response) + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_class_of_interest_multiclass(pyplot, response_method): + """Check the behaviour of passing `class_of_interest` for plotting the output of + `predict_proba` and `decision_function` in the multiclass case. + """ + iris = load_iris() + X = iris.data[:, :2] + y = iris.target # the target are numerical labels + class_of_interest_idx = 2 + + estimator = LogisticRegression().fit(X, y) + disp = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=class_of_interest_idx, + ) + + # we will check that we plot the expected values as response + grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1) + response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx] + assert_allclose(response.reshape(*disp.response.shape), disp.response) + + # make the same test but this time using target as strings + y = iris.target_names[iris.target] + estimator = LogisticRegression().fit(X, y) + + disp = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=iris.target_names[class_of_interest_idx], + ) + + grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1) + response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx] + assert_allclose(response.reshape(*disp.response.shape), disp.response) + + # check that we raise an error for unknown labels + # this test should already be handled in `_get_response_values` but we can have this + # test here as well + err_msg = "class_of_interest=2 is not a valid label: It should be one of" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=class_of_interest_idx, + ) + + # TODO: remove this test when we handle multiclass with class_of_interest=None + # by showing the max of the decision function or the max of the predicted + # probabilities. + err_msg = "Multiclass classifiers are only supported" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=None, + ) + + +def test_subclass_named_constructors_return_type_is_subclass(pyplot): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + clf = LogisticRegression().fit(X, y) + + class SubclassOfDisplay(DecisionBoundaryDisplay): + pass + + curve = SubclassOfDisplay.from_estimator(estimator=clf, X=X) + + assert isinstance(curve, SubclassOfDisplay) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..57fc68d07e887fa18b8c84c08780a77e6712d843 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py @@ -0,0 +1,1140 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy.stats.mstats import mquantiles + +from sklearn.compose import make_column_transformer +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor +from sklearn.inspection import PartialDependenceDisplay +from sklearn.linear_model import LinearRegression +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import OneHotEncoder +from sklearn.utils._testing import _convert_container + +# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved +pytestmark = pytest.mark.filterwarnings( + ( + "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:" + "matplotlib.*" + ), +) + + +@pytest.fixture(scope="module") +def diabetes(): + # diabetes dataset, subsampled for speed + data = load_diabetes() + data.data = data.data[:50] + data.target = data.target[:50] + return data + + +@pytest.fixture(scope="module") +def clf_diabetes(diabetes): + clf = GradientBoostingRegressor(n_estimators=10, random_state=1) + clf.fit(diabetes.data, diabetes.target) + return clf + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize("grid_resolution", [10, 20]) +def test_plot_partial_dependence(grid_resolution, pyplot, clf_diabetes, diabetes): + # Test partial dependence plot function. + # Use columns 0 & 2 as 1 is not quantitative (sex) + feature_names = diabetes.feature_names + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2, (0, 2)], + grid_resolution=grid_resolution, + feature_names=feature_names, + contour_kw={"cmap": "jet"}, + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert disp.figure_ is fig + assert len(axs) == 4 + + assert disp.bounding_ax_ is not None + assert disp.axes_.shape == (1, 3) + assert disp.lines_.shape == (1, 3) + assert disp.contours_.shape == (1, 3) + assert disp.deciles_vlines_.shape == (1, 3) + assert disp.deciles_hlines_.shape == (1, 3) + + assert disp.lines_[0, 2] is None + assert disp.contours_[0, 0] is None + assert disp.contours_[0, 1] is None + + # deciles lines: always show on xaxis, only show on yaxis if 2-way PDP + for i in range(3): + assert disp.deciles_vlines_[0, i] is not None + assert disp.deciles_hlines_[0, 0] is None + assert disp.deciles_hlines_[0, 1] is None + assert disp.deciles_hlines_[0, 2] is not None + + assert disp.features == [(0,), (2,), (0, 2)] + assert np.all(disp.feature_names == feature_names) + assert len(disp.deciles) == 2 + for i in [0, 2]: + assert_allclose( + disp.deciles[i], + mquantiles(diabetes.data[:, i], prob=np.arange(0.1, 1.0, 0.1)), + ) + + single_feature_positions = [(0, (0, 0)), (2, (0, 1))] + expected_ylabels = ["Partial dependence", ""] + + for i, (feat_col, pos) in enumerate(single_feature_positions): + ax = disp.axes_[pos] + assert ax.get_ylabel() == expected_ylabels[i] + assert ax.get_xlabel() == diabetes.feature_names[feat_col] + + line = disp.lines_[pos] + + avg_preds = disp.pd_results[i] + assert avg_preds.average.shape == (1, grid_resolution) + target_idx = disp.target_idx + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # two feature position + ax = disp.axes_[0, 2] + coutour = disp.contours_[0, 2] + assert coutour.get_cmap().name == "jet" + assert ax.get_xlabel() == diabetes.feature_names[0] + assert ax.get_ylabel() == diabetes.feature_names[2] + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "kind, centered, subsample, shape", + [ + ("average", False, None, (1, 3)), + ("individual", False, None, (1, 3, 50)), + ("both", False, None, (1, 3, 51)), + ("individual", False, 20, (1, 3, 20)), + ("both", False, 20, (1, 3, 21)), + ("individual", False, 0.5, (1, 3, 25)), + ("both", False, 0.5, (1, 3, 26)), + ("average", True, None, (1, 3)), + ("individual", True, None, (1, 3, 50)), + ("both", True, None, (1, 3, 51)), + ("individual", True, 20, (1, 3, 20)), + ("both", True, 20, (1, 3, 21)), + ], +) +def test_plot_partial_dependence_kind( + pyplot, + kind, + centered, + subsample, + shape, + clf_diabetes, + diabetes, +): + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1, 2], + kind=kind, + centered=centered, + subsample=subsample, + ) + + assert disp.axes_.shape == (1, 3) + assert disp.lines_.shape == shape + assert disp.contours_.shape == (1, 3) + + assert disp.contours_[0, 0] is None + assert disp.contours_[0, 1] is None + assert disp.contours_[0, 2] is None + + if centered: + assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None]) + else: + assert all([ln._y[0] != 0.0 for ln in disp.lines_.ravel() if ln is not None]) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "input_type, feature_names_type", + [ + ("dataframe", None), + ("dataframe", "list"), + ("list", "list"), + ("array", "list"), + ("dataframe", "array"), + ("list", "array"), + ("array", "array"), + ("dataframe", "series"), + ("list", "series"), + ("array", "series"), + ("dataframe", "index"), + ("list", "index"), + ("array", "index"), + ], +) +def test_plot_partial_dependence_str_features( + pyplot, + clf_diabetes, + diabetes, + input_type, + feature_names_type, +): + if input_type == "dataframe": + pd = pytest.importorskip("pandas") + X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) + elif input_type == "list": + X = diabetes.data.tolist() + else: + X = diabetes.data + + if feature_names_type is None: + feature_names = None + else: + feature_names = _convert_container(diabetes.feature_names, feature_names_type) + + grid_resolution = 25 + # check with str features and array feature names and single column + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + X, + [("age", "bmi"), "bmi"], + grid_resolution=grid_resolution, + feature_names=feature_names, + n_cols=1, + line_kw={"alpha": 0.8}, + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert len(axs) == 3 + + assert disp.figure_ is fig + assert disp.axes_.shape == (2, 1) + assert disp.lines_.shape == (2, 1) + assert disp.contours_.shape == (2, 1) + assert disp.deciles_vlines_.shape == (2, 1) + assert disp.deciles_hlines_.shape == (2, 1) + + assert disp.lines_[0, 0] is None + assert disp.deciles_vlines_[0, 0] is not None + assert disp.deciles_hlines_[0, 0] is not None + assert disp.contours_[1, 0] is None + assert disp.deciles_hlines_[1, 0] is None + assert disp.deciles_vlines_[1, 0] is not None + + # line + ax = disp.axes_[1, 0] + assert ax.get_xlabel() == "bmi" + assert ax.get_ylabel() == "Partial dependence" + + line = disp.lines_[1, 0] + avg_preds = disp.pd_results[1] + target_idx = disp.target_idx + assert line.get_alpha() == 0.8 + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # contour + ax = disp.axes_[0, 0] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "bmi" + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_custom_axes(pyplot, clf_diabetes, diabetes): + grid_resolution = 25 + fig, (ax1, ax2) = pyplot.subplots(1, 2) + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", ("age", "bmi")], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=[ax1, ax2], + ) + assert fig is disp.figure_ + assert disp.bounding_ax_ is None + assert disp.axes_.shape == (2,) + assert disp.axes_[0] is ax1 + assert disp.axes_[1] is ax2 + + ax = disp.axes_[0] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "Partial dependence" + + line = disp.lines_[0] + avg_preds = disp.pd_results[0] + target_idx = disp.target_idx + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # contour + ax = disp.axes_[1] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "bmi" + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "kind, lines", [("average", 1), ("individual", 50), ("both", 51)] +) +def test_plot_partial_dependence_passing_numpy_axes( + pyplot, clf_diabetes, diabetes, kind, lines +): + grid_resolution = 25 + feature_names = diabetes.feature_names + disp1 = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + ) + assert disp1.axes_.shape == (1, 2) + assert disp1.axes_[0, 0].get_ylabel() == "Partial dependence" + assert disp1.axes_[0, 1].get_ylabel() == "" + assert len(disp1.axes_[0, 0].get_lines()) == lines + assert len(disp1.axes_[0, 1].get_lines()) == lines + + lr = LinearRegression() + lr.fit(diabetes.data, diabetes.target) + + disp2 = PartialDependenceDisplay.from_estimator( + lr, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + ax=disp1.axes_, + ) + + assert np.all(disp1.axes_ == disp2.axes_) + assert len(disp2.axes_[0, 0].get_lines()) == 2 * lines + assert len(disp2.axes_[0, 1].get_lines()) == 2 * lines + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize("nrows, ncols", [(2, 2), (3, 1)]) +def test_plot_partial_dependence_incorrent_num_axes( + pyplot, clf_diabetes, diabetes, nrows, ncols +): + grid_resolution = 5 + fig, axes = pyplot.subplots(nrows, ncols) + axes_formats = [list(axes.ravel()), tuple(axes.ravel()), axes] + + msg = "Expected ax to have 2 axes, got {}".format(nrows * ncols) + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ) + + for ax_format in axes_formats: + with pytest.raises(ValueError, match=msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax_format, + ) + + # with axes object + with pytest.raises(ValueError, match=msg): + disp.plot(ax=ax_format) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_with_same_axes(pyplot, clf_diabetes, diabetes): + # The first call to plot_partial_dependence will create two new axes to + # place in the space of the passed in axes, which results in a total of + # three axes in the figure. + # Currently the API does not allow for the second call to + # plot_partial_dependence to use the same axes again, because it will + # create two new axes in the space resulting in five axes. To get the + # expected behavior one needs to pass the generated axes into the second + # call: + # disp1 = plot_partial_dependence(...) + # disp2 = plot_partial_dependence(..., ax=disp1.axes_) + + grid_resolution = 25 + fig, ax = pyplot.subplots() + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax, + ) + + msg = ( + "The ax was already used in another plot function, please set " + "ax=display.axes_ instead" + ) + + with pytest.raises(ValueError, match=msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax, + ) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_feature_name_reuse(pyplot, clf_diabetes, diabetes): + # second call to plot does not change the feature names from the first + # call + + feature_names = diabetes.feature_names + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + grid_resolution=10, + feature_names=feature_names, + ) + + PartialDependenceDisplay.from_estimator( + clf_diabetes, diabetes.data, [0, 1], grid_resolution=10, ax=disp.axes_ + ) + + for i, ax in enumerate(disp.axes_.ravel()): + assert ax.get_xlabel() == feature_names[i] + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_multiclass(pyplot): + grid_resolution = 25 + clf_int = GradientBoostingClassifier(n_estimators=10, random_state=1) + iris = load_iris() + + # Test partial dependence plot function on multi-class input. + clf_int.fit(iris.data, iris.target) + disp_target_0 = PartialDependenceDisplay.from_estimator( + clf_int, iris.data, [0, 3], target=0, grid_resolution=grid_resolution + ) + assert disp_target_0.figure_ is pyplot.gcf() + assert disp_target_0.axes_.shape == (1, 2) + assert disp_target_0.lines_.shape == (1, 2) + assert disp_target_0.contours_.shape == (1, 2) + assert disp_target_0.deciles_vlines_.shape == (1, 2) + assert disp_target_0.deciles_hlines_.shape == (1, 2) + assert all(c is None for c in disp_target_0.contours_.flat) + assert disp_target_0.target_idx == 0 + + # now with symbol labels + target = iris.target_names[iris.target] + clf_symbol = GradientBoostingClassifier(n_estimators=10, random_state=1) + clf_symbol.fit(iris.data, target) + disp_symbol = PartialDependenceDisplay.from_estimator( + clf_symbol, iris.data, [0, 3], target="setosa", grid_resolution=grid_resolution + ) + assert disp_symbol.figure_ is pyplot.gcf() + assert disp_symbol.axes_.shape == (1, 2) + assert disp_symbol.lines_.shape == (1, 2) + assert disp_symbol.contours_.shape == (1, 2) + assert disp_symbol.deciles_vlines_.shape == (1, 2) + assert disp_symbol.deciles_hlines_.shape == (1, 2) + assert all(c is None for c in disp_symbol.contours_.flat) + assert disp_symbol.target_idx == 0 + + for int_result, symbol_result in zip( + disp_target_0.pd_results, disp_symbol.pd_results + ): + assert_allclose(int_result.average, symbol_result.average) + assert_allclose(int_result["grid_values"], symbol_result["grid_values"]) + + # check that the pd plots are different for another target + disp_target_1 = PartialDependenceDisplay.from_estimator( + clf_int, iris.data, [0, 3], target=1, grid_resolution=grid_resolution + ) + target_0_data_y = disp_target_0.lines_[0, 0].get_data()[1] + target_1_data_y = disp_target_1.lines_[0, 0].get_data()[1] + assert any(target_0_data_y != target_1_data_y) + + +multioutput_regression_data = make_regression(n_samples=50, n_targets=2, random_state=0) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize("target", [0, 1]) +def test_plot_partial_dependence_multioutput(pyplot, target): + # Test partial dependence plot function on multi-output input. + X, y = multioutput_regression_data + clf = LinearRegression().fit(X, y) + + grid_resolution = 25 + disp = PartialDependenceDisplay.from_estimator( + clf, X, [0, 1], target=target, grid_resolution=grid_resolution + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert len(axs) == 3 + assert disp.target_idx == target + assert disp.bounding_ax_ is not None + + positions = [(0, 0), (0, 1)] + expected_label = ["Partial dependence", ""] + + for i, pos in enumerate(positions): + ax = disp.axes_[pos] + assert ax.get_ylabel() == expected_label[i] + assert ax.get_xlabel() == f"x{i}" + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_dataframe(pyplot, clf_diabetes, diabetes): + pd = pytest.importorskip("pandas") + df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) + + grid_resolution = 25 + + PartialDependenceDisplay.from_estimator( + clf_diabetes, + df, + ["bp", "s1"], + grid_resolution=grid_resolution, + feature_names=df.columns.tolist(), + ) + + +dummy_classification_data = make_classification(random_state=0) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "data, params, err_msg", + [ + ( + multioutput_regression_data, + {"target": None, "features": [0]}, + "target must be specified for multi-output", + ), + ( + multioutput_regression_data, + {"target": -1, "features": [0]}, + r"target must be in \[0, n_tasks\]", + ), + ( + multioutput_regression_data, + {"target": 100, "features": [0]}, + r"target must be in \[0, n_tasks\]", + ), + ( + dummy_classification_data, + {"features": ["foobar"], "feature_names": None}, + "Feature 'foobar' not in feature_names", + ), + ( + dummy_classification_data, + {"features": ["foobar"], "feature_names": ["abcd", "def"]}, + "Feature 'foobar' not in feature_names", + ), + ( + dummy_classification_data, + {"features": [(1, 2, 3)]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [1, {}]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [tuple()]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [123], "feature_names": ["blahblah"]}, + "All entries of features must be less than ", + ), + ( + dummy_classification_data, + {"features": [0, 1, 2], "feature_names": ["a", "b", "a"]}, + "feature_names should not contain duplicates", + ), + ( + dummy_classification_data, + {"features": [1, 2], "kind": ["both"]}, + "When `kind` is provided as a list of strings, it should contain", + ), + ( + dummy_classification_data, + {"features": [1], "subsample": -1}, + "When an integer, subsample=-1 should be positive.", + ), + ( + dummy_classification_data, + {"features": [1], "subsample": 1.2}, + r"When a floating-point, subsample=1.2 should be in the \(0, 1\) range", + ), + ( + dummy_classification_data, + {"features": [1, 2], "categorical_features": [1.0, 2.0]}, + "Expected `categorical_features` to be an array-like of boolean,", + ), + ( + dummy_classification_data, + {"features": [(1, 2)], "categorical_features": [2]}, + "Two-way partial dependence plots are not supported for pairs", + ), + ( + dummy_classification_data, + {"features": [1], "categorical_features": [1], "kind": "individual"}, + "It is not possible to display individual effects", + ), + ], +) +def test_plot_partial_dependence_error(pyplot, data, params, err_msg): + X, y = data + estimator = LinearRegression().fit(X, y) + + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator(estimator, X, **params) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"target": 4, "features": [0]}, "target not in est.classes_, got 4"), + ({"target": None, "features": [0]}, "target must be specified for multi-class"), + ( + {"target": 1, "features": [4.5]}, + "Each entry in features must be either an int,", + ), + ], +) +def test_plot_partial_dependence_multiclass_error(pyplot, params, err_msg): + iris = load_iris() + clf = GradientBoostingClassifier(n_estimators=10, random_state=1) + clf.fit(iris.data, iris.target) + + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator(clf, iris.data, **params) + + +def test_plot_partial_dependence_does_not_override_ylabel( + pyplot, clf_diabetes, diabetes +): + # Non-regression test to be sure to not override the ylabel if it has been + # See https://github.com/scikit-learn/scikit-learn/issues/15772 + _, axes = pyplot.subplots(1, 2) + axes[0].set_ylabel("Hello world") + PartialDependenceDisplay.from_estimator( + clf_diabetes, diabetes.data, [0, 1], ax=axes + ) + + assert axes[0].get_ylabel() == "Hello world" + assert axes[1].get_ylabel() == "Partial dependence" + + +@pytest.mark.parametrize( + "categorical_features, array_type", + [ + (["col_A", "col_C"], "dataframe"), + ([0, 2], "array"), + ([True, False, True], "array"), + ], +) +def test_plot_partial_dependence_with_categorical( + pyplot, categorical_features, array_type +): + X = [[1, 1, "A"], [2, 0, "C"], [3, 2, "B"]] + column_name = ["col_A", "col_B", "col_C"] + X = _convert_container(X, array_type, columns_name=column_name) + y = np.array([1.2, 0.5, 0.45]).T + + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + # single feature + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_C"], + feature_names=column_name, + categorical_features=categorical_features, + ) + + assert disp.figure_ is pyplot.gcf() + assert disp.bars_.shape == (1, 1) + assert disp.bars_[0][0] is not None + assert disp.lines_.shape == (1, 1) + assert disp.lines_[0][0] is None + assert disp.contours_.shape == (1, 1) + assert disp.contours_[0][0] is None + assert disp.deciles_vlines_.shape == (1, 1) + assert disp.deciles_vlines_[0][0] is None + assert disp.deciles_hlines_.shape == (1, 1) + assert disp.deciles_hlines_[0][0] is None + assert disp.axes_[0, 0].get_legend() is None + + # interaction between two features + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=[("col_A", "col_C")], + feature_names=column_name, + categorical_features=categorical_features, + ) + + assert disp.figure_ is pyplot.gcf() + assert disp.bars_.shape == (1, 1) + assert disp.bars_[0][0] is None + assert disp.lines_.shape == (1, 1) + assert disp.lines_[0][0] is None + assert disp.contours_.shape == (1, 1) + assert disp.contours_[0][0] is None + assert disp.deciles_vlines_.shape == (1, 1) + assert disp.deciles_vlines_[0][0] is None + assert disp.deciles_hlines_.shape == (1, 1) + assert disp.deciles_hlines_[0][0] is None + assert disp.axes_[0, 0].get_legend() is None + + +def test_plot_partial_dependence_legend(pyplot): + pd = pytest.importorskip("pandas") + X = pd.DataFrame( + { + "col_A": ["A", "B", "C"], + "col_B": [1, 0, 2], + "col_C": ["C", "B", "A"], + } + ) + y = np.array([1.2, 0.5, 0.45]).T + + categorical_features = ["col_A", "col_C"] + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_B", "col_C"], + categorical_features=categorical_features, + kind=["both", "average"], + ) + + legend_text = disp.axes_[0, 0].get_legend().get_texts() + assert len(legend_text) == 1 + assert legend_text[0].get_text() == "average" + assert disp.axes_[0, 1].get_legend() is None + + +@pytest.mark.parametrize( + "kind, expected_shape", + [("average", (1, 2)), ("individual", (1, 2, 20)), ("both", (1, 2, 21))], +) +def test_plot_partial_dependence_subsampling( + pyplot, clf_diabetes, diabetes, kind, expected_shape +): + # check that the subsampling is properly working + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/pull/18359 + matplotlib = pytest.importorskip("matplotlib") + grid_resolution = 25 + feature_names = diabetes.feature_names + + disp1 = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + subsample=20, + random_state=0, + ) + + assert disp1.lines_.shape == expected_shape + assert all( + [isinstance(line, matplotlib.lines.Line2D) for line in disp1.lines_.ravel()] + ) + + +@pytest.mark.parametrize( + "kind, line_kw, label", + [ + ("individual", {}, None), + ("individual", {"label": "xxx"}, None), + ("average", {}, None), + ("average", {"label": "xxx"}, "xxx"), + ("both", {}, "average"), + ("both", {"label": "xxx"}, "xxx"), + ], +) +def test_partial_dependence_overwrite_labels( + pyplot, + clf_diabetes, + diabetes, + kind, + line_kw, + label, +): + """Test that make sure that we can overwrite the label of the PDP plot""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2], + grid_resolution=25, + feature_names=diabetes.feature_names, + kind=kind, + line_kw=line_kw, + ) + + for ax in disp.axes_.ravel(): + if label is None: + assert ax.get_legend() is None + else: + legend_text = ax.get_legend().get_texts() + assert len(legend_text) == 1 + assert legend_text[0].get_text() == label + + +@pytest.mark.parametrize( + "categorical_features, array_type", + [ + (["col_A", "col_C"], "dataframe"), + ([0, 2], "array"), + ([True, False, True], "array"), + ], +) +def test_grid_resolution_with_categorical(pyplot, categorical_features, array_type): + """Check that we raise a ValueError when the grid_resolution is too small + respect to the number of categories in the categorical features targeted. + """ + X = [["A", 1, "A"], ["B", 0, "C"], ["C", 2, "B"]] + column_name = ["col_A", "col_B", "col_C"] + X = _convert_container(X, array_type, columns_name=column_name) + y = np.array([1.2, 0.5, 0.45]).T + + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + err_msg = ( + "resolution of the computed grid is less than the minimum number of categories" + ) + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_C"], + feature_names=column_name, + categorical_features=categorical_features, + grid_resolution=2, + ) + + +@pytest.mark.parametrize("kind", ["individual", "average", "both"]) +@pytest.mark.parametrize("centered", [True, False]) +def test_partial_dependence_plot_limits_one_way( + pyplot, clf_diabetes, diabetes, kind, centered +): + """Check that the PD limit on the plots are properly set on one-way plots.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=(0, 1), + kind=kind, + grid_resolution=25, + feature_names=diabetes.feature_names, + ) + + range_pd = np.array([-1, 1], dtype=np.float64) + for pd in disp.pd_results: + if "average" in pd: + pd["average"][...] = range_pd[1] + pd["average"][0, 0] = range_pd[0] + if "individual" in pd: + pd["individual"][...] = range_pd[1] + pd["individual"][0, 0, 0] = range_pd[0] + + disp.plot(centered=centered) + # check that we anchor to zero x-axis when centering + y_lim = range_pd - range_pd[0] if centered else range_pd + padding = 0.05 * (y_lim[1] - y_lim[0]) + y_lim[0] -= padding + y_lim[1] += padding + for ax in disp.axes_.ravel(): + assert_allclose(ax.get_ylim(), y_lim) + + +@pytest.mark.parametrize("centered", [True, False]) +def test_partial_dependence_plot_limits_two_way( + pyplot, clf_diabetes, diabetes, centered +): + """Check that the PD limit on the plots are properly set on two-way plots.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[(0, 1)], + kind="average", + grid_resolution=25, + feature_names=diabetes.feature_names, + ) + + range_pd = np.array([-1, 1], dtype=np.float64) + for pd in disp.pd_results: + pd["average"][...] = range_pd[1] + pd["average"][0, 0] = range_pd[0] + + disp.plot(centered=centered) + contours = disp.contours_[0, 0] + levels = range_pd - range_pd[0] if centered else range_pd + + padding = 0.05 * (levels[1] - levels[0]) + levels[0] -= padding + levels[1] += padding + expect_levels = np.linspace(*levels, num=8) + assert_allclose(contours.levels, expect_levels) + + +def test_partial_dependence_kind_list( + pyplot, + clf_diabetes, + diabetes, +): + """Check that we can provide a list of strings to kind parameter.""" + matplotlib = pytest.importorskip("matplotlib") + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[0, 2, (1, 2)], + grid_resolution=20, + kind=["both", "both", "average"], + ) + + for idx in [0, 1]: + assert all( + [ + isinstance(line, matplotlib.lines.Line2D) + for line in disp.lines_[0, idx].ravel() + ] + ) + assert disp.contours_[0, idx] is None + + assert disp.contours_[0, 2] is not None + assert all([line is None for line in disp.lines_[0, 2].ravel()]) + + +@pytest.mark.parametrize( + "features, kind", + [ + ([0, 2, (1, 2)], "individual"), + ([0, 2, (1, 2)], "both"), + ([(0, 1), (0, 2), (1, 2)], "individual"), + ([(0, 1), (0, 2), (1, 2)], "both"), + ([0, 2, (1, 2)], ["individual", "individual", "individual"]), + ([0, 2, (1, 2)], ["both", "both", "both"]), + ], +) +def test_partial_dependence_kind_error( + pyplot, + clf_diabetes, + diabetes, + features, + kind, +): + """Check that we raise an informative error when 2-way PD is requested + together with 1-way PD/ICE""" + warn_msg = ( + "ICE plot cannot be rendered for 2-way feature interactions. 2-way " + "feature interactions mandates PD plots using the 'average' kind" + ) + with pytest.raises(ValueError, match=warn_msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=features, + grid_resolution=20, + kind=kind, + ) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "line_kw, pd_line_kw, ice_lines_kw, expected_colors", + [ + ({"color": "r"}, {"color": "g"}, {"color": "b"}, ("g", "b")), + (None, {"color": "g"}, {"color": "b"}, ("g", "b")), + ({"color": "r"}, None, {"color": "b"}, ("r", "b")), + ({"color": "r"}, {"color": "g"}, None, ("g", "r")), + ({"color": "r"}, None, None, ("r", "r")), + ({"color": "r"}, {"linestyle": "--"}, {"linestyle": "-."}, ("r", "r")), + ], +) +def test_plot_partial_dependence_lines_kw( + pyplot, + clf_diabetes, + diabetes, + line_kw, + pd_line_kw, + ice_lines_kw, + expected_colors, +): + """Check that passing `pd_line_kw` and `ice_lines_kw` will act on the + specific lines in the plot. + """ + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2], + grid_resolution=20, + feature_names=diabetes.feature_names, + n_cols=2, + kind="both", + line_kw=line_kw, + pd_line_kw=pd_line_kw, + ice_lines_kw=ice_lines_kw, + ) + + line = disp.lines_[0, 0, -1] + assert line.get_color() == expected_colors[0] + if pd_line_kw is not None and "linestyle" in pd_line_kw: + assert line.get_linestyle() == pd_line_kw["linestyle"] + else: + assert line.get_linestyle() == "--" + + line = disp.lines_[0, 0, 0] + assert line.get_color() == expected_colors[1] + if ice_lines_kw is not None and "linestyle" in ice_lines_kw: + assert line.get_linestyle() == ice_lines_kw["linestyle"] + else: + assert line.get_linestyle() == "-" + + +def test_partial_dependence_display_wrong_len_kind( + pyplot, + clf_diabetes, + diabetes, +): + """Check that we raise an error when `kind` is a list with a wrong length. + + This case can only be triggered using the `PartialDependenceDisplay.from_estimator` + method. + """ + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[0, 2], + grid_resolution=20, + kind="average", # len(kind) != len(features) + ) + + # alter `kind` to be a list with a length different from length of `features` + disp.kind = ["average"] + err_msg = ( + r"When `kind` is provided as a list of strings, it should contain as many" + r" elements as `features`. `kind` contains 1 element\(s\) and `features`" + r" contains 2 element\(s\)." + ) + with pytest.raises(ValueError, match=err_msg): + disp.plot() + + +@pytest.mark.parametrize( + "kind", + ["individual", "both", "average", ["average", "both"], ["individual", "both"]], +) +def test_partial_dependence_display_kind_centered_interaction( + pyplot, + kind, + clf_diabetes, + diabetes, +): + """Check that we properly center ICE and PD when passing kind as a string and as a + list.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + kind=kind, + centered=True, + subsample=5, + ) + + assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None]) + + +def test_partial_dependence_display_with_constant_sample_weight( + pyplot, + clf_diabetes, + diabetes, +): + """Check that the utilization of a constant sample weight maintains the + standard behavior. + """ + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + kind="average", + method="brute", + ) + + sample_weight = np.ones_like(diabetes.target) + disp_sw = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + sample_weight=sample_weight, + kind="average", + method="brute", + ) + + assert np.array_equal( + disp.pd_results[0]["average"], disp_sw.pd_results[0]["average"] + ) + + +def test_subclass_named_constructors_return_type_is_subclass( + pyplot, diabetes, clf_diabetes +): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + + class SubclassOfDisplay(PartialDependenceDisplay): + pass + + curve = SubclassOfDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2, (0, 2)], + ) + + assert isinstance(curve, SubclassOfDisplay)