Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__init__.py +30 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd +91 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/link.py +280 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/loss.py +1177 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py +111 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py +1320 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/__init__.py +20 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py +1463 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/_target.py +342 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py +2582 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py +387 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-40981.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdf-42074.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz +3 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__init__.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn._loss` module includes loss function classes suitable for
|
3 |
+
fitting classification and regression tasks.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from .loss import (
|
7 |
+
AbsoluteError,
|
8 |
+
HalfBinomialLoss,
|
9 |
+
HalfGammaLoss,
|
10 |
+
HalfMultinomialLoss,
|
11 |
+
HalfPoissonLoss,
|
12 |
+
HalfSquaredError,
|
13 |
+
HalfTweedieLoss,
|
14 |
+
HalfTweedieLossIdentity,
|
15 |
+
HuberLoss,
|
16 |
+
PinballLoss,
|
17 |
+
)
|
18 |
+
|
19 |
+
__all__ = [
|
20 |
+
"HalfSquaredError",
|
21 |
+
"AbsoluteError",
|
22 |
+
"PinballLoss",
|
23 |
+
"HuberLoss",
|
24 |
+
"HalfPoissonLoss",
|
25 |
+
"HalfGammaLoss",
|
26 |
+
"HalfTweedieLoss",
|
27 |
+
"HalfTweedieLossIdentity",
|
28 |
+
"HalfBinomialLoss",
|
29 |
+
"HalfMultinomialLoss",
|
30 |
+
]
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (666 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/link.cpython-310.pyc
ADDED
Binary file (8.96 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/__pycache__/loss.cpython-310.pyc
ADDED
Binary file (33.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/_loss.pxd
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Fused types for input like y_true, raw_prediction, sample_weights.
|
2 |
+
ctypedef fused floating_in:
|
3 |
+
double
|
4 |
+
float
|
5 |
+
|
6 |
+
|
7 |
+
# Fused types for output like gradient and hessian
|
8 |
+
# We use a different fused types for input (floating_in) and output (floating_out), such
|
9 |
+
# that input and output can have different dtypes in the same function call. A single
|
10 |
+
# fused type can only take on one single value (type) for all arguments in one function
|
11 |
+
# call.
|
12 |
+
ctypedef fused floating_out:
|
13 |
+
double
|
14 |
+
float
|
15 |
+
|
16 |
+
|
17 |
+
# Struct to return 2 doubles
|
18 |
+
ctypedef struct double_pair:
|
19 |
+
double val1
|
20 |
+
double val2
|
21 |
+
|
22 |
+
|
23 |
+
# C base class for loss functions
|
24 |
+
cdef class CyLossFunction:
|
25 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
26 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
27 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
28 |
+
|
29 |
+
|
30 |
+
cdef class CyHalfSquaredError(CyLossFunction):
|
31 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
32 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
33 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
34 |
+
|
35 |
+
|
36 |
+
cdef class CyAbsoluteError(CyLossFunction):
|
37 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
38 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
39 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
40 |
+
|
41 |
+
|
42 |
+
cdef class CyPinballLoss(CyLossFunction):
|
43 |
+
cdef readonly double quantile # readonly makes it accessible from Python
|
44 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
45 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
46 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
47 |
+
|
48 |
+
|
49 |
+
cdef class CyHuberLoss(CyLossFunction):
|
50 |
+
cdef public double delta # public makes it accessible from Python
|
51 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
52 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
53 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
54 |
+
|
55 |
+
|
56 |
+
cdef class CyHalfPoissonLoss(CyLossFunction):
|
57 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
58 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
59 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
60 |
+
|
61 |
+
|
62 |
+
cdef class CyHalfGammaLoss(CyLossFunction):
|
63 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
64 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
65 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
66 |
+
|
67 |
+
|
68 |
+
cdef class CyHalfTweedieLoss(CyLossFunction):
|
69 |
+
cdef readonly double power # readonly makes it accessible from Python
|
70 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
71 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
72 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
73 |
+
|
74 |
+
|
75 |
+
cdef class CyHalfTweedieLossIdentity(CyLossFunction):
|
76 |
+
cdef readonly double power # readonly makes it accessible from Python
|
77 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
78 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
79 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
80 |
+
|
81 |
+
|
82 |
+
cdef class CyHalfBinomialLoss(CyLossFunction):
|
83 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
84 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
85 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
86 |
+
|
87 |
+
|
88 |
+
cdef class CyExponentialLoss(CyLossFunction):
|
89 |
+
cdef double cy_loss(self, double y_true, double raw_prediction) noexcept nogil
|
90 |
+
cdef double cy_gradient(self, double y_true, double raw_prediction) noexcept nogil
|
91 |
+
cdef double_pair cy_grad_hess(self, double y_true, double raw_prediction) noexcept nogil
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/link.py
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Module contains classes for invertible (and differentiable) link functions.
|
3 |
+
"""
|
4 |
+
# Author: Christian Lorentzen <[email protected]>
|
5 |
+
|
6 |
+
from abc import ABC, abstractmethod
|
7 |
+
from dataclasses import dataclass
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
from scipy.special import expit, logit
|
11 |
+
from scipy.stats import gmean
|
12 |
+
|
13 |
+
from ..utils.extmath import softmax
|
14 |
+
|
15 |
+
|
16 |
+
@dataclass
|
17 |
+
class Interval:
|
18 |
+
low: float
|
19 |
+
high: float
|
20 |
+
low_inclusive: bool
|
21 |
+
high_inclusive: bool
|
22 |
+
|
23 |
+
def __post_init__(self):
|
24 |
+
"""Check that low <= high"""
|
25 |
+
if self.low > self.high:
|
26 |
+
raise ValueError(
|
27 |
+
f"One must have low <= high; got low={self.low}, high={self.high}."
|
28 |
+
)
|
29 |
+
|
30 |
+
def includes(self, x):
|
31 |
+
"""Test whether all values of x are in interval range.
|
32 |
+
|
33 |
+
Parameters
|
34 |
+
----------
|
35 |
+
x : ndarray
|
36 |
+
Array whose elements are tested to be in interval range.
|
37 |
+
|
38 |
+
Returns
|
39 |
+
-------
|
40 |
+
result : bool
|
41 |
+
"""
|
42 |
+
if self.low_inclusive:
|
43 |
+
low = np.greater_equal(x, self.low)
|
44 |
+
else:
|
45 |
+
low = np.greater(x, self.low)
|
46 |
+
|
47 |
+
if not np.all(low):
|
48 |
+
return False
|
49 |
+
|
50 |
+
if self.high_inclusive:
|
51 |
+
high = np.less_equal(x, self.high)
|
52 |
+
else:
|
53 |
+
high = np.less(x, self.high)
|
54 |
+
|
55 |
+
# Note: np.all returns numpy.bool_
|
56 |
+
return bool(np.all(high))
|
57 |
+
|
58 |
+
|
59 |
+
def _inclusive_low_high(interval, dtype=np.float64):
|
60 |
+
"""Generate values low and high to be within the interval range.
|
61 |
+
|
62 |
+
This is used in tests only.
|
63 |
+
|
64 |
+
Returns
|
65 |
+
-------
|
66 |
+
low, high : tuple
|
67 |
+
The returned values low and high lie within the interval.
|
68 |
+
"""
|
69 |
+
eps = 10 * np.finfo(dtype).eps
|
70 |
+
if interval.low == -np.inf:
|
71 |
+
low = -1e10
|
72 |
+
elif interval.low < 0:
|
73 |
+
low = interval.low * (1 - eps) + eps
|
74 |
+
else:
|
75 |
+
low = interval.low * (1 + eps) + eps
|
76 |
+
|
77 |
+
if interval.high == np.inf:
|
78 |
+
high = 1e10
|
79 |
+
elif interval.high < 0:
|
80 |
+
high = interval.high * (1 + eps) - eps
|
81 |
+
else:
|
82 |
+
high = interval.high * (1 - eps) - eps
|
83 |
+
|
84 |
+
return low, high
|
85 |
+
|
86 |
+
|
87 |
+
class BaseLink(ABC):
|
88 |
+
"""Abstract base class for differentiable, invertible link functions.
|
89 |
+
|
90 |
+
Convention:
|
91 |
+
- link function g: raw_prediction = g(y_pred)
|
92 |
+
- inverse link h: y_pred = h(raw_prediction)
|
93 |
+
|
94 |
+
For (generalized) linear models, `raw_prediction = X @ coef` is the so
|
95 |
+
called linear predictor, and `y_pred = h(raw_prediction)` is the predicted
|
96 |
+
conditional (on X) expected value of the target `y_true`.
|
97 |
+
|
98 |
+
The methods are not implemented as staticmethods in case a link function needs
|
99 |
+
parameters.
|
100 |
+
"""
|
101 |
+
|
102 |
+
is_multiclass = False # used for testing only
|
103 |
+
|
104 |
+
# Usually, raw_prediction may be any real number and y_pred is an open
|
105 |
+
# interval.
|
106 |
+
# interval_raw_prediction = Interval(-np.inf, np.inf, False, False)
|
107 |
+
interval_y_pred = Interval(-np.inf, np.inf, False, False)
|
108 |
+
|
109 |
+
@abstractmethod
|
110 |
+
def link(self, y_pred, out=None):
|
111 |
+
"""Compute the link function g(y_pred).
|
112 |
+
|
113 |
+
The link function maps (predicted) target values to raw predictions,
|
114 |
+
i.e. `g(y_pred) = raw_prediction`.
|
115 |
+
|
116 |
+
Parameters
|
117 |
+
----------
|
118 |
+
y_pred : array
|
119 |
+
Predicted target values.
|
120 |
+
out : array
|
121 |
+
A location into which the result is stored. If provided, it must
|
122 |
+
have a shape that the inputs broadcast to. If not provided or None,
|
123 |
+
a freshly-allocated array is returned.
|
124 |
+
|
125 |
+
Returns
|
126 |
+
-------
|
127 |
+
out : array
|
128 |
+
Output array, element-wise link function.
|
129 |
+
"""
|
130 |
+
|
131 |
+
@abstractmethod
|
132 |
+
def inverse(self, raw_prediction, out=None):
|
133 |
+
"""Compute the inverse link function h(raw_prediction).
|
134 |
+
|
135 |
+
The inverse link function maps raw predictions to predicted target
|
136 |
+
values, i.e. `h(raw_prediction) = y_pred`.
|
137 |
+
|
138 |
+
Parameters
|
139 |
+
----------
|
140 |
+
raw_prediction : array
|
141 |
+
Raw prediction values (in link space).
|
142 |
+
out : array
|
143 |
+
A location into which the result is stored. If provided, it must
|
144 |
+
have a shape that the inputs broadcast to. If not provided or None,
|
145 |
+
a freshly-allocated array is returned.
|
146 |
+
|
147 |
+
Returns
|
148 |
+
-------
|
149 |
+
out : array
|
150 |
+
Output array, element-wise inverse link function.
|
151 |
+
"""
|
152 |
+
|
153 |
+
|
154 |
+
class IdentityLink(BaseLink):
|
155 |
+
"""The identity link function g(x)=x."""
|
156 |
+
|
157 |
+
def link(self, y_pred, out=None):
|
158 |
+
if out is not None:
|
159 |
+
np.copyto(out, y_pred)
|
160 |
+
return out
|
161 |
+
else:
|
162 |
+
return y_pred
|
163 |
+
|
164 |
+
inverse = link
|
165 |
+
|
166 |
+
|
167 |
+
class LogLink(BaseLink):
|
168 |
+
"""The log link function g(x)=log(x)."""
|
169 |
+
|
170 |
+
interval_y_pred = Interval(0, np.inf, False, False)
|
171 |
+
|
172 |
+
def link(self, y_pred, out=None):
|
173 |
+
return np.log(y_pred, out=out)
|
174 |
+
|
175 |
+
def inverse(self, raw_prediction, out=None):
|
176 |
+
return np.exp(raw_prediction, out=out)
|
177 |
+
|
178 |
+
|
179 |
+
class LogitLink(BaseLink):
|
180 |
+
"""The logit link function g(x)=logit(x)."""
|
181 |
+
|
182 |
+
interval_y_pred = Interval(0, 1, False, False)
|
183 |
+
|
184 |
+
def link(self, y_pred, out=None):
|
185 |
+
return logit(y_pred, out=out)
|
186 |
+
|
187 |
+
def inverse(self, raw_prediction, out=None):
|
188 |
+
return expit(raw_prediction, out=out)
|
189 |
+
|
190 |
+
|
191 |
+
class HalfLogitLink(BaseLink):
|
192 |
+
"""Half the logit link function g(x)=1/2 * logit(x).
|
193 |
+
|
194 |
+
Used for the exponential loss.
|
195 |
+
"""
|
196 |
+
|
197 |
+
interval_y_pred = Interval(0, 1, False, False)
|
198 |
+
|
199 |
+
def link(self, y_pred, out=None):
|
200 |
+
out = logit(y_pred, out=out)
|
201 |
+
out *= 0.5
|
202 |
+
return out
|
203 |
+
|
204 |
+
def inverse(self, raw_prediction, out=None):
|
205 |
+
return expit(2 * raw_prediction, out)
|
206 |
+
|
207 |
+
|
208 |
+
class MultinomialLogit(BaseLink):
|
209 |
+
"""The symmetric multinomial logit function.
|
210 |
+
|
211 |
+
Convention:
|
212 |
+
- y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
|
213 |
+
|
214 |
+
Notes:
|
215 |
+
- The inverse link h is the softmax function.
|
216 |
+
- The sum is over the second axis, i.e. axis=1 (n_classes).
|
217 |
+
|
218 |
+
We have to choose additional constraints in order to make
|
219 |
+
|
220 |
+
y_pred[k] = exp(raw_pred[k]) / sum(exp(raw_pred[k]), k=0..n_classes-1)
|
221 |
+
|
222 |
+
for n_classes classes identifiable and invertible.
|
223 |
+
We choose the symmetric side constraint where the geometric mean response
|
224 |
+
is set as reference category, see [2]:
|
225 |
+
|
226 |
+
The symmetric multinomial logit link function for a single data point is
|
227 |
+
then defined as
|
228 |
+
|
229 |
+
raw_prediction[k] = g(y_pred[k]) = log(y_pred[k]/gmean(y_pred))
|
230 |
+
= log(y_pred[k]) - mean(log(y_pred)).
|
231 |
+
|
232 |
+
Note that this is equivalent to the definition in [1] and implies mean
|
233 |
+
centered raw predictions:
|
234 |
+
|
235 |
+
sum(raw_prediction[k], k=0..n_classes-1) = 0.
|
236 |
+
|
237 |
+
For linear models with raw_prediction = X @ coef, this corresponds to
|
238 |
+
sum(coef[k], k=0..n_classes-1) = 0, i.e. the sum over classes for every
|
239 |
+
feature is zero.
|
240 |
+
|
241 |
+
Reference
|
242 |
+
---------
|
243 |
+
.. [1] Friedman, Jerome; Hastie, Trevor; Tibshirani, Robert. "Additive
|
244 |
+
logistic regression: a statistical view of boosting" Ann. Statist.
|
245 |
+
28 (2000), no. 2, 337--407. doi:10.1214/aos/1016218223.
|
246 |
+
https://projecteuclid.org/euclid.aos/1016218223
|
247 |
+
|
248 |
+
.. [2] Zahid, Faisal Maqbool and Gerhard Tutz. "Ridge estimation for
|
249 |
+
multinomial logit models with symmetric side constraints."
|
250 |
+
Computational Statistics 28 (2013): 1017-1034.
|
251 |
+
http://epub.ub.uni-muenchen.de/11001/1/tr067.pdf
|
252 |
+
"""
|
253 |
+
|
254 |
+
is_multiclass = True
|
255 |
+
interval_y_pred = Interval(0, 1, False, False)
|
256 |
+
|
257 |
+
def symmetrize_raw_prediction(self, raw_prediction):
|
258 |
+
return raw_prediction - np.mean(raw_prediction, axis=1)[:, np.newaxis]
|
259 |
+
|
260 |
+
def link(self, y_pred, out=None):
|
261 |
+
# geometric mean as reference category
|
262 |
+
gm = gmean(y_pred, axis=1)
|
263 |
+
return np.log(y_pred / gm[:, np.newaxis], out=out)
|
264 |
+
|
265 |
+
def inverse(self, raw_prediction, out=None):
|
266 |
+
if out is None:
|
267 |
+
return softmax(raw_prediction, copy=True)
|
268 |
+
else:
|
269 |
+
np.copyto(out, raw_prediction)
|
270 |
+
softmax(out, copy=False)
|
271 |
+
return out
|
272 |
+
|
273 |
+
|
274 |
+
_LINKS = {
|
275 |
+
"identity": IdentityLink,
|
276 |
+
"log": LogLink,
|
277 |
+
"logit": LogitLink,
|
278 |
+
"half_logit": HalfLogitLink,
|
279 |
+
"multinomial_logit": MultinomialLogit,
|
280 |
+
}
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/loss.py
ADDED
@@ -0,0 +1,1177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module contains loss classes suitable for fitting.
|
3 |
+
|
4 |
+
It is not part of the public API.
|
5 |
+
Specific losses are used for regression, binary classification or multiclass
|
6 |
+
classification.
|
7 |
+
"""
|
8 |
+
# Goals:
|
9 |
+
# - Provide a common private module for loss functions/classes.
|
10 |
+
# - To be used in:
|
11 |
+
# - LogisticRegression
|
12 |
+
# - PoissonRegressor, GammaRegressor, TweedieRegressor
|
13 |
+
# - HistGradientBoostingRegressor, HistGradientBoostingClassifier
|
14 |
+
# - GradientBoostingRegressor, GradientBoostingClassifier
|
15 |
+
# - SGDRegressor, SGDClassifier
|
16 |
+
# - Replace link module of GLMs.
|
17 |
+
|
18 |
+
import numbers
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
from scipy.special import xlogy
|
22 |
+
|
23 |
+
from ..utils import check_scalar
|
24 |
+
from ..utils.stats import _weighted_percentile
|
25 |
+
from ._loss import (
|
26 |
+
CyAbsoluteError,
|
27 |
+
CyExponentialLoss,
|
28 |
+
CyHalfBinomialLoss,
|
29 |
+
CyHalfGammaLoss,
|
30 |
+
CyHalfMultinomialLoss,
|
31 |
+
CyHalfPoissonLoss,
|
32 |
+
CyHalfSquaredError,
|
33 |
+
CyHalfTweedieLoss,
|
34 |
+
CyHalfTweedieLossIdentity,
|
35 |
+
CyHuberLoss,
|
36 |
+
CyPinballLoss,
|
37 |
+
)
|
38 |
+
from .link import (
|
39 |
+
HalfLogitLink,
|
40 |
+
IdentityLink,
|
41 |
+
Interval,
|
42 |
+
LogitLink,
|
43 |
+
LogLink,
|
44 |
+
MultinomialLogit,
|
45 |
+
)
|
46 |
+
|
47 |
+
|
48 |
+
# Note: The shape of raw_prediction for multiclass classifications are
|
49 |
+
# - GradientBoostingClassifier: (n_samples, n_classes)
|
50 |
+
# - HistGradientBoostingClassifier: (n_classes, n_samples)
|
51 |
+
#
|
52 |
+
# Note: Instead of inheritance like
|
53 |
+
#
|
54 |
+
# class BaseLoss(BaseLink, CyLossFunction):
|
55 |
+
# ...
|
56 |
+
#
|
57 |
+
# # Note: Naturally, we would inherit in the following order
|
58 |
+
# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
|
59 |
+
# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as
|
60 |
+
# # the last one. This, of course, changes the MRO.
|
61 |
+
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss):
|
62 |
+
#
|
63 |
+
# we use composition. This way we improve maintainability by avoiding the above
|
64 |
+
# mentioned Cython edge case and have easier to understand code (which method calls
|
65 |
+
# which code).
|
66 |
+
class BaseLoss:
|
67 |
+
"""Base class for a loss function of 1-dimensional targets.
|
68 |
+
|
69 |
+
Conventions:
|
70 |
+
|
71 |
+
- y_true.shape = sample_weight.shape = (n_samples,)
|
72 |
+
- y_pred.shape = raw_prediction.shape = (n_samples,)
|
73 |
+
- If is_multiclass is true (multiclass classification), then
|
74 |
+
y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
|
75 |
+
Note that this corresponds to the return value of decision_function.
|
76 |
+
|
77 |
+
y_true, y_pred, sample_weight and raw_prediction must either be all float64
|
78 |
+
or all float32.
|
79 |
+
gradient and hessian must be either both float64 or both float32.
|
80 |
+
|
81 |
+
Note that y_pred = link.inverse(raw_prediction).
|
82 |
+
|
83 |
+
Specific loss classes can inherit specific link classes to satisfy
|
84 |
+
BaseLink's abstractmethods.
|
85 |
+
|
86 |
+
Parameters
|
87 |
+
----------
|
88 |
+
sample_weight : {None, ndarray}
|
89 |
+
If sample_weight is None, the hessian might be constant.
|
90 |
+
n_classes : {None, int}
|
91 |
+
The number of classes for classification, else None.
|
92 |
+
|
93 |
+
Attributes
|
94 |
+
----------
|
95 |
+
closs: CyLossFunction
|
96 |
+
link : BaseLink
|
97 |
+
interval_y_true : Interval
|
98 |
+
Valid interval for y_true
|
99 |
+
interval_y_pred : Interval
|
100 |
+
Valid Interval for y_pred
|
101 |
+
differentiable : bool
|
102 |
+
Indicates whether or not loss function is differentiable in
|
103 |
+
raw_prediction everywhere.
|
104 |
+
need_update_leaves_values : bool
|
105 |
+
Indicates whether decision trees in gradient boosting need to uptade
|
106 |
+
leave values after having been fit to the (negative) gradients.
|
107 |
+
approx_hessian : bool
|
108 |
+
Indicates whether the hessian is approximated or exact. If,
|
109 |
+
approximated, it should be larger or equal to the exact one.
|
110 |
+
constant_hessian : bool
|
111 |
+
Indicates whether the hessian is one for this loss.
|
112 |
+
is_multiclass : bool
|
113 |
+
Indicates whether n_classes > 2 is allowed.
|
114 |
+
"""
|
115 |
+
|
116 |
+
# For gradient boosted decision trees:
|
117 |
+
# This variable indicates whether the loss requires the leaves values to
|
118 |
+
# be updated once the tree has been trained. The trees are trained to
|
119 |
+
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
|
120 |
+
# some losses (e.g. least absolute deviation) we need to adjust the tree
|
121 |
+
# values to account for the "line search" of the gradient descent
|
122 |
+
# procedure. See the original paper Greedy Function Approximation: A
|
123 |
+
# Gradient Boosting Machine by Friedman
|
124 |
+
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
|
125 |
+
differentiable = True
|
126 |
+
need_update_leaves_values = False
|
127 |
+
is_multiclass = False
|
128 |
+
|
129 |
+
def __init__(self, closs, link, n_classes=None):
|
130 |
+
self.closs = closs
|
131 |
+
self.link = link
|
132 |
+
self.approx_hessian = False
|
133 |
+
self.constant_hessian = False
|
134 |
+
self.n_classes = n_classes
|
135 |
+
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
|
136 |
+
self.interval_y_pred = self.link.interval_y_pred
|
137 |
+
|
138 |
+
def in_y_true_range(self, y):
|
139 |
+
"""Return True if y is in the valid range of y_true.
|
140 |
+
|
141 |
+
Parameters
|
142 |
+
----------
|
143 |
+
y : ndarray
|
144 |
+
"""
|
145 |
+
return self.interval_y_true.includes(y)
|
146 |
+
|
147 |
+
def in_y_pred_range(self, y):
|
148 |
+
"""Return True if y is in the valid range of y_pred.
|
149 |
+
|
150 |
+
Parameters
|
151 |
+
----------
|
152 |
+
y : ndarray
|
153 |
+
"""
|
154 |
+
return self.interval_y_pred.includes(y)
|
155 |
+
|
156 |
+
def loss(
|
157 |
+
self,
|
158 |
+
y_true,
|
159 |
+
raw_prediction,
|
160 |
+
sample_weight=None,
|
161 |
+
loss_out=None,
|
162 |
+
n_threads=1,
|
163 |
+
):
|
164 |
+
"""Compute the pointwise loss value for each input.
|
165 |
+
|
166 |
+
Parameters
|
167 |
+
----------
|
168 |
+
y_true : C-contiguous array of shape (n_samples,)
|
169 |
+
Observed, true target values.
|
170 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
171 |
+
shape (n_samples, n_classes)
|
172 |
+
Raw prediction values (in link space).
|
173 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
174 |
+
Sample weights.
|
175 |
+
loss_out : None or C-contiguous array of shape (n_samples,)
|
176 |
+
A location into which the result is stored. If None, a new array
|
177 |
+
might be created.
|
178 |
+
n_threads : int, default=1
|
179 |
+
Might use openmp thread parallelism.
|
180 |
+
|
181 |
+
Returns
|
182 |
+
-------
|
183 |
+
loss : array of shape (n_samples,)
|
184 |
+
Element-wise loss function.
|
185 |
+
"""
|
186 |
+
if loss_out is None:
|
187 |
+
loss_out = np.empty_like(y_true)
|
188 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
189 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
190 |
+
raw_prediction = raw_prediction.squeeze(1)
|
191 |
+
|
192 |
+
self.closs.loss(
|
193 |
+
y_true=y_true,
|
194 |
+
raw_prediction=raw_prediction,
|
195 |
+
sample_weight=sample_weight,
|
196 |
+
loss_out=loss_out,
|
197 |
+
n_threads=n_threads,
|
198 |
+
)
|
199 |
+
return loss_out
|
200 |
+
|
201 |
+
def loss_gradient(
|
202 |
+
self,
|
203 |
+
y_true,
|
204 |
+
raw_prediction,
|
205 |
+
sample_weight=None,
|
206 |
+
loss_out=None,
|
207 |
+
gradient_out=None,
|
208 |
+
n_threads=1,
|
209 |
+
):
|
210 |
+
"""Compute loss and gradient w.r.t. raw_prediction for each input.
|
211 |
+
|
212 |
+
Parameters
|
213 |
+
----------
|
214 |
+
y_true : C-contiguous array of shape (n_samples,)
|
215 |
+
Observed, true target values.
|
216 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
217 |
+
shape (n_samples, n_classes)
|
218 |
+
Raw prediction values (in link space).
|
219 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
220 |
+
Sample weights.
|
221 |
+
loss_out : None or C-contiguous array of shape (n_samples,)
|
222 |
+
A location into which the loss is stored. If None, a new array
|
223 |
+
might be created.
|
224 |
+
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
|
225 |
+
of shape (n_samples, n_classes)
|
226 |
+
A location into which the gradient is stored. If None, a new array
|
227 |
+
might be created.
|
228 |
+
n_threads : int, default=1
|
229 |
+
Might use openmp thread parallelism.
|
230 |
+
|
231 |
+
Returns
|
232 |
+
-------
|
233 |
+
loss : array of shape (n_samples,)
|
234 |
+
Element-wise loss function.
|
235 |
+
|
236 |
+
gradient : array of shape (n_samples,) or (n_samples, n_classes)
|
237 |
+
Element-wise gradients.
|
238 |
+
"""
|
239 |
+
if loss_out is None:
|
240 |
+
if gradient_out is None:
|
241 |
+
loss_out = np.empty_like(y_true)
|
242 |
+
gradient_out = np.empty_like(raw_prediction)
|
243 |
+
else:
|
244 |
+
loss_out = np.empty_like(y_true, dtype=gradient_out.dtype)
|
245 |
+
elif gradient_out is None:
|
246 |
+
gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype)
|
247 |
+
|
248 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
249 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
250 |
+
raw_prediction = raw_prediction.squeeze(1)
|
251 |
+
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
|
252 |
+
gradient_out = gradient_out.squeeze(1)
|
253 |
+
|
254 |
+
self.closs.loss_gradient(
|
255 |
+
y_true=y_true,
|
256 |
+
raw_prediction=raw_prediction,
|
257 |
+
sample_weight=sample_weight,
|
258 |
+
loss_out=loss_out,
|
259 |
+
gradient_out=gradient_out,
|
260 |
+
n_threads=n_threads,
|
261 |
+
)
|
262 |
+
return loss_out, gradient_out
|
263 |
+
|
264 |
+
def gradient(
|
265 |
+
self,
|
266 |
+
y_true,
|
267 |
+
raw_prediction,
|
268 |
+
sample_weight=None,
|
269 |
+
gradient_out=None,
|
270 |
+
n_threads=1,
|
271 |
+
):
|
272 |
+
"""Compute gradient of loss w.r.t raw_prediction for each input.
|
273 |
+
|
274 |
+
Parameters
|
275 |
+
----------
|
276 |
+
y_true : C-contiguous array of shape (n_samples,)
|
277 |
+
Observed, true target values.
|
278 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
279 |
+
shape (n_samples, n_classes)
|
280 |
+
Raw prediction values (in link space).
|
281 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
282 |
+
Sample weights.
|
283 |
+
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
|
284 |
+
of shape (n_samples, n_classes)
|
285 |
+
A location into which the result is stored. If None, a new array
|
286 |
+
might be created.
|
287 |
+
n_threads : int, default=1
|
288 |
+
Might use openmp thread parallelism.
|
289 |
+
|
290 |
+
Returns
|
291 |
+
-------
|
292 |
+
gradient : array of shape (n_samples,) or (n_samples, n_classes)
|
293 |
+
Element-wise gradients.
|
294 |
+
"""
|
295 |
+
if gradient_out is None:
|
296 |
+
gradient_out = np.empty_like(raw_prediction)
|
297 |
+
|
298 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
299 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
300 |
+
raw_prediction = raw_prediction.squeeze(1)
|
301 |
+
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
|
302 |
+
gradient_out = gradient_out.squeeze(1)
|
303 |
+
|
304 |
+
self.closs.gradient(
|
305 |
+
y_true=y_true,
|
306 |
+
raw_prediction=raw_prediction,
|
307 |
+
sample_weight=sample_weight,
|
308 |
+
gradient_out=gradient_out,
|
309 |
+
n_threads=n_threads,
|
310 |
+
)
|
311 |
+
return gradient_out
|
312 |
+
|
313 |
+
def gradient_hessian(
|
314 |
+
self,
|
315 |
+
y_true,
|
316 |
+
raw_prediction,
|
317 |
+
sample_weight=None,
|
318 |
+
gradient_out=None,
|
319 |
+
hessian_out=None,
|
320 |
+
n_threads=1,
|
321 |
+
):
|
322 |
+
"""Compute gradient and hessian of loss w.r.t raw_prediction.
|
323 |
+
|
324 |
+
Parameters
|
325 |
+
----------
|
326 |
+
y_true : C-contiguous array of shape (n_samples,)
|
327 |
+
Observed, true target values.
|
328 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
329 |
+
shape (n_samples, n_classes)
|
330 |
+
Raw prediction values (in link space).
|
331 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
332 |
+
Sample weights.
|
333 |
+
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
|
334 |
+
of shape (n_samples, n_classes)
|
335 |
+
A location into which the gradient is stored. If None, a new array
|
336 |
+
might be created.
|
337 |
+
hessian_out : None or C-contiguous array of shape (n_samples,) or array \
|
338 |
+
of shape (n_samples, n_classes)
|
339 |
+
A location into which the hessian is stored. If None, a new array
|
340 |
+
might be created.
|
341 |
+
n_threads : int, default=1
|
342 |
+
Might use openmp thread parallelism.
|
343 |
+
|
344 |
+
Returns
|
345 |
+
-------
|
346 |
+
gradient : arrays of shape (n_samples,) or (n_samples, n_classes)
|
347 |
+
Element-wise gradients.
|
348 |
+
|
349 |
+
hessian : arrays of shape (n_samples,) or (n_samples, n_classes)
|
350 |
+
Element-wise hessians.
|
351 |
+
"""
|
352 |
+
if gradient_out is None:
|
353 |
+
if hessian_out is None:
|
354 |
+
gradient_out = np.empty_like(raw_prediction)
|
355 |
+
hessian_out = np.empty_like(raw_prediction)
|
356 |
+
else:
|
357 |
+
gradient_out = np.empty_like(hessian_out)
|
358 |
+
elif hessian_out is None:
|
359 |
+
hessian_out = np.empty_like(gradient_out)
|
360 |
+
|
361 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
362 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
363 |
+
raw_prediction = raw_prediction.squeeze(1)
|
364 |
+
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
|
365 |
+
gradient_out = gradient_out.squeeze(1)
|
366 |
+
if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:
|
367 |
+
hessian_out = hessian_out.squeeze(1)
|
368 |
+
|
369 |
+
self.closs.gradient_hessian(
|
370 |
+
y_true=y_true,
|
371 |
+
raw_prediction=raw_prediction,
|
372 |
+
sample_weight=sample_weight,
|
373 |
+
gradient_out=gradient_out,
|
374 |
+
hessian_out=hessian_out,
|
375 |
+
n_threads=n_threads,
|
376 |
+
)
|
377 |
+
return gradient_out, hessian_out
|
378 |
+
|
379 |
+
def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1):
|
380 |
+
"""Compute the weighted average loss.
|
381 |
+
|
382 |
+
Parameters
|
383 |
+
----------
|
384 |
+
y_true : C-contiguous array of shape (n_samples,)
|
385 |
+
Observed, true target values.
|
386 |
+
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
|
387 |
+
shape (n_samples, n_classes)
|
388 |
+
Raw prediction values (in link space).
|
389 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
390 |
+
Sample weights.
|
391 |
+
n_threads : int, default=1
|
392 |
+
Might use openmp thread parallelism.
|
393 |
+
|
394 |
+
Returns
|
395 |
+
-------
|
396 |
+
loss : float
|
397 |
+
Mean or averaged loss function.
|
398 |
+
"""
|
399 |
+
return np.average(
|
400 |
+
self.loss(
|
401 |
+
y_true=y_true,
|
402 |
+
raw_prediction=raw_prediction,
|
403 |
+
sample_weight=None,
|
404 |
+
loss_out=None,
|
405 |
+
n_threads=n_threads,
|
406 |
+
),
|
407 |
+
weights=sample_weight,
|
408 |
+
)
|
409 |
+
|
410 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
411 |
+
"""Compute raw_prediction of an intercept-only model.
|
412 |
+
|
413 |
+
This can be used as initial estimates of predictions, i.e. before the
|
414 |
+
first iteration in fit.
|
415 |
+
|
416 |
+
Parameters
|
417 |
+
----------
|
418 |
+
y_true : array-like of shape (n_samples,)
|
419 |
+
Observed, true target values.
|
420 |
+
sample_weight : None or array of shape (n_samples,)
|
421 |
+
Sample weights.
|
422 |
+
|
423 |
+
Returns
|
424 |
+
-------
|
425 |
+
raw_prediction : numpy scalar or array of shape (n_classes,)
|
426 |
+
Raw predictions of an intercept-only model.
|
427 |
+
"""
|
428 |
+
# As default, take weighted average of the target over the samples
|
429 |
+
# axis=0 and then transform into link-scale (raw_prediction).
|
430 |
+
y_pred = np.average(y_true, weights=sample_weight, axis=0)
|
431 |
+
eps = 10 * np.finfo(y_pred.dtype).eps
|
432 |
+
|
433 |
+
if self.interval_y_pred.low == -np.inf:
|
434 |
+
a_min = None
|
435 |
+
elif self.interval_y_pred.low_inclusive:
|
436 |
+
a_min = self.interval_y_pred.low
|
437 |
+
else:
|
438 |
+
a_min = self.interval_y_pred.low + eps
|
439 |
+
|
440 |
+
if self.interval_y_pred.high == np.inf:
|
441 |
+
a_max = None
|
442 |
+
elif self.interval_y_pred.high_inclusive:
|
443 |
+
a_max = self.interval_y_pred.high
|
444 |
+
else:
|
445 |
+
a_max = self.interval_y_pred.high - eps
|
446 |
+
|
447 |
+
if a_min is None and a_max is None:
|
448 |
+
return self.link.link(y_pred)
|
449 |
+
else:
|
450 |
+
return self.link.link(np.clip(y_pred, a_min, a_max))
|
451 |
+
|
452 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
453 |
+
"""Calculate term dropped in loss.
|
454 |
+
|
455 |
+
With this term added, the loss of perfect predictions is zero.
|
456 |
+
"""
|
457 |
+
return np.zeros_like(y_true)
|
458 |
+
|
459 |
+
def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"):
|
460 |
+
"""Initialize arrays for gradients and hessians.
|
461 |
+
|
462 |
+
Unless hessians are constant, arrays are initialized with undefined values.
|
463 |
+
|
464 |
+
Parameters
|
465 |
+
----------
|
466 |
+
n_samples : int
|
467 |
+
The number of samples, usually passed to `fit()`.
|
468 |
+
dtype : {np.float64, np.float32}, default=np.float64
|
469 |
+
The dtype of the arrays gradient and hessian.
|
470 |
+
order : {'C', 'F'}, default='F'
|
471 |
+
Order of the arrays gradient and hessian. The default 'F' makes the arrays
|
472 |
+
contiguous along samples.
|
473 |
+
|
474 |
+
Returns
|
475 |
+
-------
|
476 |
+
gradient : C-contiguous array of shape (n_samples,) or array of shape \
|
477 |
+
(n_samples, n_classes)
|
478 |
+
Empty array (allocated but not initialized) to be used as argument
|
479 |
+
gradient_out.
|
480 |
+
hessian : C-contiguous array of shape (n_samples,), array of shape
|
481 |
+
(n_samples, n_classes) or shape (1,)
|
482 |
+
Empty (allocated but not initialized) array to be used as argument
|
483 |
+
hessian_out.
|
484 |
+
If constant_hessian is True (e.g. `HalfSquaredError`), the array is
|
485 |
+
initialized to ``1``.
|
486 |
+
"""
|
487 |
+
if dtype not in (np.float32, np.float64):
|
488 |
+
raise ValueError(
|
489 |
+
"Valid options for 'dtype' are np.float32 and np.float64. "
|
490 |
+
f"Got dtype={dtype} instead."
|
491 |
+
)
|
492 |
+
|
493 |
+
if self.is_multiclass:
|
494 |
+
shape = (n_samples, self.n_classes)
|
495 |
+
else:
|
496 |
+
shape = (n_samples,)
|
497 |
+
gradient = np.empty(shape=shape, dtype=dtype, order=order)
|
498 |
+
|
499 |
+
if self.constant_hessian:
|
500 |
+
# If the hessians are constant, we consider them equal to 1.
|
501 |
+
# - This is correct for HalfSquaredError
|
502 |
+
# - For AbsoluteError, hessians are actually 0, but they are
|
503 |
+
# always ignored anyway.
|
504 |
+
hessian = np.ones(shape=(1,), dtype=dtype)
|
505 |
+
else:
|
506 |
+
hessian = np.empty(shape=shape, dtype=dtype, order=order)
|
507 |
+
|
508 |
+
return gradient, hessian
|
509 |
+
|
510 |
+
|
511 |
+
# Note: Naturally, we would inherit in the following order
|
512 |
+
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
|
513 |
+
# But because of https://github.com/cython/cython/issues/4350 we
|
514 |
+
# set BaseLoss as the last one. This, of course, changes the MRO.
|
515 |
+
class HalfSquaredError(BaseLoss):
|
516 |
+
"""Half squared error with identity link, for regression.
|
517 |
+
|
518 |
+
Domain:
|
519 |
+
y_true and y_pred all real numbers
|
520 |
+
|
521 |
+
Link:
|
522 |
+
y_pred = raw_prediction
|
523 |
+
|
524 |
+
For a given sample x_i, half squared error is defined as::
|
525 |
+
|
526 |
+
loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2
|
527 |
+
|
528 |
+
The factor of 0.5 simplifies the computation of gradients and results in a
|
529 |
+
unit hessian (and is consistent with what is done in LightGBM). It is also
|
530 |
+
half the Normal distribution deviance.
|
531 |
+
"""
|
532 |
+
|
533 |
+
def __init__(self, sample_weight=None):
|
534 |
+
super().__init__(closs=CyHalfSquaredError(), link=IdentityLink())
|
535 |
+
self.constant_hessian = sample_weight is None
|
536 |
+
|
537 |
+
|
538 |
+
class AbsoluteError(BaseLoss):
|
539 |
+
"""Absolute error with identity link, for regression.
|
540 |
+
|
541 |
+
Domain:
|
542 |
+
y_true and y_pred all real numbers
|
543 |
+
|
544 |
+
Link:
|
545 |
+
y_pred = raw_prediction
|
546 |
+
|
547 |
+
For a given sample x_i, the absolute error is defined as::
|
548 |
+
|
549 |
+
loss(x_i) = |y_true_i - raw_prediction_i|
|
550 |
+
|
551 |
+
Note that the exact hessian = 0 almost everywhere (except at one point, therefore
|
552 |
+
differentiable = False). Optimization routines like in HGBT, however, need a
|
553 |
+
hessian > 0. Therefore, we assign 1.
|
554 |
+
"""
|
555 |
+
|
556 |
+
differentiable = False
|
557 |
+
need_update_leaves_values = True
|
558 |
+
|
559 |
+
def __init__(self, sample_weight=None):
|
560 |
+
super().__init__(closs=CyAbsoluteError(), link=IdentityLink())
|
561 |
+
self.approx_hessian = True
|
562 |
+
self.constant_hessian = sample_weight is None
|
563 |
+
|
564 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
565 |
+
"""Compute raw_prediction of an intercept-only model.
|
566 |
+
|
567 |
+
This is the weighted median of the target, i.e. over the samples
|
568 |
+
axis=0.
|
569 |
+
"""
|
570 |
+
if sample_weight is None:
|
571 |
+
return np.median(y_true, axis=0)
|
572 |
+
else:
|
573 |
+
return _weighted_percentile(y_true, sample_weight, 50)
|
574 |
+
|
575 |
+
|
576 |
+
class PinballLoss(BaseLoss):
|
577 |
+
"""Quantile loss aka pinball loss, for regression.
|
578 |
+
|
579 |
+
Domain:
|
580 |
+
y_true and y_pred all real numbers
|
581 |
+
quantile in (0, 1)
|
582 |
+
|
583 |
+
Link:
|
584 |
+
y_pred = raw_prediction
|
585 |
+
|
586 |
+
For a given sample x_i, the pinball loss is defined as::
|
587 |
+
|
588 |
+
loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i)
|
589 |
+
|
590 |
+
rho_{quantile}(u) = u * (quantile - 1_{u<0})
|
591 |
+
= -u *(1 - quantile) if u < 0
|
592 |
+
u * quantile if u >= 0
|
593 |
+
|
594 |
+
Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError().
|
595 |
+
|
596 |
+
Note that the exact hessian = 0 almost everywhere (except at one point, therefore
|
597 |
+
differentiable = False). Optimization routines like in HGBT, however, need a
|
598 |
+
hessian > 0. Therefore, we assign 1.
|
599 |
+
|
600 |
+
Additional Attributes
|
601 |
+
---------------------
|
602 |
+
quantile : float
|
603 |
+
The quantile level of the quantile to be estimated. Must be in range (0, 1).
|
604 |
+
"""
|
605 |
+
|
606 |
+
differentiable = False
|
607 |
+
need_update_leaves_values = True
|
608 |
+
|
609 |
+
def __init__(self, sample_weight=None, quantile=0.5):
|
610 |
+
check_scalar(
|
611 |
+
quantile,
|
612 |
+
"quantile",
|
613 |
+
target_type=numbers.Real,
|
614 |
+
min_val=0,
|
615 |
+
max_val=1,
|
616 |
+
include_boundaries="neither",
|
617 |
+
)
|
618 |
+
super().__init__(
|
619 |
+
closs=CyPinballLoss(quantile=float(quantile)),
|
620 |
+
link=IdentityLink(),
|
621 |
+
)
|
622 |
+
self.approx_hessian = True
|
623 |
+
self.constant_hessian = sample_weight is None
|
624 |
+
|
625 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
626 |
+
"""Compute raw_prediction of an intercept-only model.
|
627 |
+
|
628 |
+
This is the weighted median of the target, i.e. over the samples
|
629 |
+
axis=0.
|
630 |
+
"""
|
631 |
+
if sample_weight is None:
|
632 |
+
return np.percentile(y_true, 100 * self.closs.quantile, axis=0)
|
633 |
+
else:
|
634 |
+
return _weighted_percentile(
|
635 |
+
y_true, sample_weight, 100 * self.closs.quantile
|
636 |
+
)
|
637 |
+
|
638 |
+
|
639 |
+
class HuberLoss(BaseLoss):
|
640 |
+
"""Huber loss, for regression.
|
641 |
+
|
642 |
+
Domain:
|
643 |
+
y_true and y_pred all real numbers
|
644 |
+
quantile in (0, 1)
|
645 |
+
|
646 |
+
Link:
|
647 |
+
y_pred = raw_prediction
|
648 |
+
|
649 |
+
For a given sample x_i, the Huber loss is defined as::
|
650 |
+
|
651 |
+
loss(x_i) = 1/2 * abserr**2 if abserr <= delta
|
652 |
+
delta * (abserr - delta/2) if abserr > delta
|
653 |
+
|
654 |
+
abserr = |y_true_i - raw_prediction_i|
|
655 |
+
delta = quantile(abserr, self.quantile)
|
656 |
+
|
657 |
+
Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0)
|
658 |
+
equals delta * (AbsoluteError() - delta/2).
|
659 |
+
|
660 |
+
Additional Attributes
|
661 |
+
---------------------
|
662 |
+
quantile : float
|
663 |
+
The quantile level which defines the breaking point `delta` to distinguish
|
664 |
+
between absolute error and squared error. Must be in range (0, 1).
|
665 |
+
|
666 |
+
Reference
|
667 |
+
---------
|
668 |
+
.. [1] Friedman, J.H. (2001). :doi:`Greedy function approximation: A gradient
|
669 |
+
boosting machine <10.1214/aos/1013203451>`.
|
670 |
+
Annals of Statistics, 29, 1189-1232.
|
671 |
+
"""
|
672 |
+
|
673 |
+
differentiable = False
|
674 |
+
need_update_leaves_values = True
|
675 |
+
|
676 |
+
def __init__(self, sample_weight=None, quantile=0.9, delta=0.5):
|
677 |
+
check_scalar(
|
678 |
+
quantile,
|
679 |
+
"quantile",
|
680 |
+
target_type=numbers.Real,
|
681 |
+
min_val=0,
|
682 |
+
max_val=1,
|
683 |
+
include_boundaries="neither",
|
684 |
+
)
|
685 |
+
self.quantile = quantile # This is better stored outside of Cython.
|
686 |
+
super().__init__(
|
687 |
+
closs=CyHuberLoss(delta=float(delta)),
|
688 |
+
link=IdentityLink(),
|
689 |
+
)
|
690 |
+
self.approx_hessian = True
|
691 |
+
self.constant_hessian = False
|
692 |
+
|
693 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
694 |
+
"""Compute raw_prediction of an intercept-only model.
|
695 |
+
|
696 |
+
This is the weighted median of the target, i.e. over the samples
|
697 |
+
axis=0.
|
698 |
+
"""
|
699 |
+
# See formula before algo 4 in Friedman (2001), but we apply it to y_true,
|
700 |
+
# not to the residual y_true - raw_prediction. An estimator like
|
701 |
+
# HistGradientBoostingRegressor might then call it on the residual, e.g.
|
702 |
+
# fit_intercept_only(y_true - raw_prediction).
|
703 |
+
if sample_weight is None:
|
704 |
+
median = np.percentile(y_true, 50, axis=0)
|
705 |
+
else:
|
706 |
+
median = _weighted_percentile(y_true, sample_weight, 50)
|
707 |
+
diff = y_true - median
|
708 |
+
term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))
|
709 |
+
return median + np.average(term, weights=sample_weight)
|
710 |
+
|
711 |
+
|
712 |
+
class HalfPoissonLoss(BaseLoss):
|
713 |
+
"""Half Poisson deviance loss with log-link, for regression.
|
714 |
+
|
715 |
+
Domain:
|
716 |
+
y_true in non-negative real numbers
|
717 |
+
y_pred in positive real numbers
|
718 |
+
|
719 |
+
Link:
|
720 |
+
y_pred = exp(raw_prediction)
|
721 |
+
|
722 |
+
For a given sample x_i, half the Poisson deviance is defined as::
|
723 |
+
|
724 |
+
loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i))
|
725 |
+
- y_true_i + exp(raw_prediction_i)
|
726 |
+
|
727 |
+
Half the Poisson deviance is actually the negative log-likelihood up to
|
728 |
+
constant terms (not involving raw_prediction) and simplifies the
|
729 |
+
computation of the gradients.
|
730 |
+
We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`.
|
731 |
+
"""
|
732 |
+
|
733 |
+
def __init__(self, sample_weight=None):
|
734 |
+
super().__init__(closs=CyHalfPoissonLoss(), link=LogLink())
|
735 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
736 |
+
|
737 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
738 |
+
term = xlogy(y_true, y_true) - y_true
|
739 |
+
if sample_weight is not None:
|
740 |
+
term *= sample_weight
|
741 |
+
return term
|
742 |
+
|
743 |
+
|
744 |
+
class HalfGammaLoss(BaseLoss):
|
745 |
+
"""Half Gamma deviance loss with log-link, for regression.
|
746 |
+
|
747 |
+
Domain:
|
748 |
+
y_true and y_pred in positive real numbers
|
749 |
+
|
750 |
+
Link:
|
751 |
+
y_pred = exp(raw_prediction)
|
752 |
+
|
753 |
+
For a given sample x_i, half Gamma deviance loss is defined as::
|
754 |
+
|
755 |
+
loss(x_i) = log(exp(raw_prediction_i)/y_true_i)
|
756 |
+
+ y_true/exp(raw_prediction_i) - 1
|
757 |
+
|
758 |
+
Half the Gamma deviance is actually proportional to the negative log-
|
759 |
+
likelihood up to constant terms (not involving raw_prediction) and
|
760 |
+
simplifies the computation of the gradients.
|
761 |
+
We also skip the constant term `-log(y_true_i) - 1`.
|
762 |
+
"""
|
763 |
+
|
764 |
+
def __init__(self, sample_weight=None):
|
765 |
+
super().__init__(closs=CyHalfGammaLoss(), link=LogLink())
|
766 |
+
self.interval_y_true = Interval(0, np.inf, False, False)
|
767 |
+
|
768 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
769 |
+
term = -np.log(y_true) - 1
|
770 |
+
if sample_weight is not None:
|
771 |
+
term *= sample_weight
|
772 |
+
return term
|
773 |
+
|
774 |
+
|
775 |
+
class HalfTweedieLoss(BaseLoss):
|
776 |
+
"""Half Tweedie deviance loss with log-link, for regression.
|
777 |
+
|
778 |
+
Domain:
|
779 |
+
y_true in real numbers for power <= 0
|
780 |
+
y_true in non-negative real numbers for 0 < power < 2
|
781 |
+
y_true in positive real numbers for 2 <= power
|
782 |
+
y_pred in positive real numbers
|
783 |
+
power in real numbers
|
784 |
+
|
785 |
+
Link:
|
786 |
+
y_pred = exp(raw_prediction)
|
787 |
+
|
788 |
+
For a given sample x_i, half Tweedie deviance loss with p=power is defined
|
789 |
+
as::
|
790 |
+
|
791 |
+
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
|
792 |
+
- y_true_i * exp(raw_prediction_i)**(1-p) / (1-p)
|
793 |
+
+ exp(raw_prediction_i)**(2-p) / (2-p)
|
794 |
+
|
795 |
+
Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link,
|
796 |
+
HalfPoissonLoss and HalfGammaLoss.
|
797 |
+
|
798 |
+
We also skip constant terms, but those are different for p=0, 1, 2.
|
799 |
+
Therefore, the loss is not continuous in `power`.
|
800 |
+
|
801 |
+
Note furthermore that although no Tweedie distribution exists for
|
802 |
+
0 < power < 1, it still gives a strictly consistent scoring function for
|
803 |
+
the expectation.
|
804 |
+
"""
|
805 |
+
|
806 |
+
def __init__(self, sample_weight=None, power=1.5):
|
807 |
+
super().__init__(
|
808 |
+
closs=CyHalfTweedieLoss(power=float(power)),
|
809 |
+
link=LogLink(),
|
810 |
+
)
|
811 |
+
if self.closs.power <= 0:
|
812 |
+
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
|
813 |
+
elif self.closs.power < 2:
|
814 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
815 |
+
else:
|
816 |
+
self.interval_y_true = Interval(0, np.inf, False, False)
|
817 |
+
|
818 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
819 |
+
if self.closs.power == 0:
|
820 |
+
return HalfSquaredError().constant_to_optimal_zero(
|
821 |
+
y_true=y_true, sample_weight=sample_weight
|
822 |
+
)
|
823 |
+
elif self.closs.power == 1:
|
824 |
+
return HalfPoissonLoss().constant_to_optimal_zero(
|
825 |
+
y_true=y_true, sample_weight=sample_weight
|
826 |
+
)
|
827 |
+
elif self.closs.power == 2:
|
828 |
+
return HalfGammaLoss().constant_to_optimal_zero(
|
829 |
+
y_true=y_true, sample_weight=sample_weight
|
830 |
+
)
|
831 |
+
else:
|
832 |
+
p = self.closs.power
|
833 |
+
term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p)
|
834 |
+
if sample_weight is not None:
|
835 |
+
term *= sample_weight
|
836 |
+
return term
|
837 |
+
|
838 |
+
|
839 |
+
class HalfTweedieLossIdentity(BaseLoss):
|
840 |
+
"""Half Tweedie deviance loss with identity link, for regression.
|
841 |
+
|
842 |
+
Domain:
|
843 |
+
y_true in real numbers for power <= 0
|
844 |
+
y_true in non-negative real numbers for 0 < power < 2
|
845 |
+
y_true in positive real numbers for 2 <= power
|
846 |
+
y_pred in positive real numbers for power != 0
|
847 |
+
y_pred in real numbers for power = 0
|
848 |
+
power in real numbers
|
849 |
+
|
850 |
+
Link:
|
851 |
+
y_pred = raw_prediction
|
852 |
+
|
853 |
+
For a given sample x_i, half Tweedie deviance loss with p=power is defined
|
854 |
+
as::
|
855 |
+
|
856 |
+
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
|
857 |
+
- y_true_i * raw_prediction_i**(1-p) / (1-p)
|
858 |
+
+ raw_prediction_i**(2-p) / (2-p)
|
859 |
+
|
860 |
+
Note that the minimum value of this loss is 0.
|
861 |
+
|
862 |
+
Note furthermore that although no Tweedie distribution exists for
|
863 |
+
0 < power < 1, it still gives a strictly consistent scoring function for
|
864 |
+
the expectation.
|
865 |
+
"""
|
866 |
+
|
867 |
+
def __init__(self, sample_weight=None, power=1.5):
|
868 |
+
super().__init__(
|
869 |
+
closs=CyHalfTweedieLossIdentity(power=float(power)),
|
870 |
+
link=IdentityLink(),
|
871 |
+
)
|
872 |
+
if self.closs.power <= 0:
|
873 |
+
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
|
874 |
+
elif self.closs.power < 2:
|
875 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
876 |
+
else:
|
877 |
+
self.interval_y_true = Interval(0, np.inf, False, False)
|
878 |
+
|
879 |
+
if self.closs.power == 0:
|
880 |
+
self.interval_y_pred = Interval(-np.inf, np.inf, False, False)
|
881 |
+
else:
|
882 |
+
self.interval_y_pred = Interval(0, np.inf, False, False)
|
883 |
+
|
884 |
+
|
885 |
+
class HalfBinomialLoss(BaseLoss):
|
886 |
+
"""Half Binomial deviance loss with logit link, for binary classification.
|
887 |
+
|
888 |
+
This is also know as binary cross entropy, log-loss and logistic loss.
|
889 |
+
|
890 |
+
Domain:
|
891 |
+
y_true in [0, 1], i.e. regression on the unit interval
|
892 |
+
y_pred in (0, 1), i.e. boundaries excluded
|
893 |
+
|
894 |
+
Link:
|
895 |
+
y_pred = expit(raw_prediction)
|
896 |
+
|
897 |
+
For a given sample x_i, half Binomial deviance is defined as the negative
|
898 |
+
log-likelihood of the Binomial/Bernoulli distribution and can be expressed
|
899 |
+
as::
|
900 |
+
|
901 |
+
loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
|
902 |
+
|
903 |
+
See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
|
904 |
+
section 4.4.1 (about logistic regression).
|
905 |
+
|
906 |
+
Note that the formulation works for classification, y = {0, 1}, as well as
|
907 |
+
logistic regression, y = [0, 1].
|
908 |
+
If you add `constant_to_optimal_zero` to the loss, you get half the
|
909 |
+
Bernoulli/binomial deviance.
|
910 |
+
|
911 |
+
More details: Inserting the predicted probability y_pred = expit(raw_prediction)
|
912 |
+
in the loss gives the well known::
|
913 |
+
|
914 |
+
loss(x_i) = - y_true_i * log(y_pred_i) - (1 - y_true_i) * log(1 - y_pred_i)
|
915 |
+
"""
|
916 |
+
|
917 |
+
def __init__(self, sample_weight=None):
|
918 |
+
super().__init__(
|
919 |
+
closs=CyHalfBinomialLoss(),
|
920 |
+
link=LogitLink(),
|
921 |
+
n_classes=2,
|
922 |
+
)
|
923 |
+
self.interval_y_true = Interval(0, 1, True, True)
|
924 |
+
|
925 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
926 |
+
# This is non-zero only if y_true is neither 0 nor 1.
|
927 |
+
term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true)
|
928 |
+
if sample_weight is not None:
|
929 |
+
term *= sample_weight
|
930 |
+
return term
|
931 |
+
|
932 |
+
def predict_proba(self, raw_prediction):
|
933 |
+
"""Predict probabilities.
|
934 |
+
|
935 |
+
Parameters
|
936 |
+
----------
|
937 |
+
raw_prediction : array of shape (n_samples,) or (n_samples, 1)
|
938 |
+
Raw prediction values (in link space).
|
939 |
+
|
940 |
+
Returns
|
941 |
+
-------
|
942 |
+
proba : array of shape (n_samples, 2)
|
943 |
+
Element-wise class probabilities.
|
944 |
+
"""
|
945 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
946 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
947 |
+
raw_prediction = raw_prediction.squeeze(1)
|
948 |
+
proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
|
949 |
+
proba[:, 1] = self.link.inverse(raw_prediction)
|
950 |
+
proba[:, 0] = 1 - proba[:, 1]
|
951 |
+
return proba
|
952 |
+
|
953 |
+
|
954 |
+
class HalfMultinomialLoss(BaseLoss):
|
955 |
+
"""Categorical cross-entropy loss, for multiclass classification.
|
956 |
+
|
957 |
+
Domain:
|
958 |
+
y_true in {0, 1, 2, 3, .., n_classes - 1}
|
959 |
+
y_pred has n_classes elements, each element in (0, 1)
|
960 |
+
|
961 |
+
Link:
|
962 |
+
y_pred = softmax(raw_prediction)
|
963 |
+
|
964 |
+
Note: We assume y_true to be already label encoded. The inverse link is
|
965 |
+
softmax. But the full link function is the symmetric multinomial logit
|
966 |
+
function.
|
967 |
+
|
968 |
+
For a given sample x_i, the categorical cross-entropy loss is defined as
|
969 |
+
the negative log-likelihood of the multinomial distribution, it
|
970 |
+
generalizes the binary cross-entropy to more than 2 classes::
|
971 |
+
|
972 |
+
loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1))
|
973 |
+
- sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1)
|
974 |
+
|
975 |
+
See [1].
|
976 |
+
|
977 |
+
Note that for the hessian, we calculate only the diagonal part in the
|
978 |
+
classes: If the full hessian for classes k and l and sample i is H_i_k_l,
|
979 |
+
we calculate H_i_k_k, i.e. k=l.
|
980 |
+
|
981 |
+
Reference
|
982 |
+
---------
|
983 |
+
.. [1] :arxiv:`Simon, Noah, J. Friedman and T. Hastie.
|
984 |
+
"A Blockwise Descent Algorithm for Group-penalized Multiresponse and
|
985 |
+
Multinomial Regression".
|
986 |
+
<1311.6529>`
|
987 |
+
"""
|
988 |
+
|
989 |
+
is_multiclass = True
|
990 |
+
|
991 |
+
def __init__(self, sample_weight=None, n_classes=3):
|
992 |
+
super().__init__(
|
993 |
+
closs=CyHalfMultinomialLoss(),
|
994 |
+
link=MultinomialLogit(),
|
995 |
+
n_classes=n_classes,
|
996 |
+
)
|
997 |
+
self.interval_y_true = Interval(0, np.inf, True, False)
|
998 |
+
self.interval_y_pred = Interval(0, 1, False, False)
|
999 |
+
|
1000 |
+
def in_y_true_range(self, y):
|
1001 |
+
"""Return True if y is in the valid range of y_true.
|
1002 |
+
|
1003 |
+
Parameters
|
1004 |
+
----------
|
1005 |
+
y : ndarray
|
1006 |
+
"""
|
1007 |
+
return self.interval_y_true.includes(y) and np.all(y.astype(int) == y)
|
1008 |
+
|
1009 |
+
def fit_intercept_only(self, y_true, sample_weight=None):
|
1010 |
+
"""Compute raw_prediction of an intercept-only model.
|
1011 |
+
|
1012 |
+
This is the softmax of the weighted average of the target, i.e. over
|
1013 |
+
the samples axis=0.
|
1014 |
+
"""
|
1015 |
+
out = np.zeros(self.n_classes, dtype=y_true.dtype)
|
1016 |
+
eps = np.finfo(y_true.dtype).eps
|
1017 |
+
for k in range(self.n_classes):
|
1018 |
+
out[k] = np.average(y_true == k, weights=sample_weight, axis=0)
|
1019 |
+
out[k] = np.clip(out[k], eps, 1 - eps)
|
1020 |
+
return self.link.link(out[None, :]).reshape(-1)
|
1021 |
+
|
1022 |
+
def predict_proba(self, raw_prediction):
|
1023 |
+
"""Predict probabilities.
|
1024 |
+
|
1025 |
+
Parameters
|
1026 |
+
----------
|
1027 |
+
raw_prediction : array of shape (n_samples, n_classes)
|
1028 |
+
Raw prediction values (in link space).
|
1029 |
+
|
1030 |
+
Returns
|
1031 |
+
-------
|
1032 |
+
proba : array of shape (n_samples, n_classes)
|
1033 |
+
Element-wise class probabilities.
|
1034 |
+
"""
|
1035 |
+
return self.link.inverse(raw_prediction)
|
1036 |
+
|
1037 |
+
def gradient_proba(
|
1038 |
+
self,
|
1039 |
+
y_true,
|
1040 |
+
raw_prediction,
|
1041 |
+
sample_weight=None,
|
1042 |
+
gradient_out=None,
|
1043 |
+
proba_out=None,
|
1044 |
+
n_threads=1,
|
1045 |
+
):
|
1046 |
+
"""Compute gradient and class probabilities fow raw_prediction.
|
1047 |
+
|
1048 |
+
Parameters
|
1049 |
+
----------
|
1050 |
+
y_true : C-contiguous array of shape (n_samples,)
|
1051 |
+
Observed, true target values.
|
1052 |
+
raw_prediction : array of shape (n_samples, n_classes)
|
1053 |
+
Raw prediction values (in link space).
|
1054 |
+
sample_weight : None or C-contiguous array of shape (n_samples,)
|
1055 |
+
Sample weights.
|
1056 |
+
gradient_out : None or array of shape (n_samples, n_classes)
|
1057 |
+
A location into which the gradient is stored. If None, a new array
|
1058 |
+
might be created.
|
1059 |
+
proba_out : None or array of shape (n_samples, n_classes)
|
1060 |
+
A location into which the class probabilities are stored. If None,
|
1061 |
+
a new array might be created.
|
1062 |
+
n_threads : int, default=1
|
1063 |
+
Might use openmp thread parallelism.
|
1064 |
+
|
1065 |
+
Returns
|
1066 |
+
-------
|
1067 |
+
gradient : array of shape (n_samples, n_classes)
|
1068 |
+
Element-wise gradients.
|
1069 |
+
|
1070 |
+
proba : array of shape (n_samples, n_classes)
|
1071 |
+
Element-wise class probabilities.
|
1072 |
+
"""
|
1073 |
+
if gradient_out is None:
|
1074 |
+
if proba_out is None:
|
1075 |
+
gradient_out = np.empty_like(raw_prediction)
|
1076 |
+
proba_out = np.empty_like(raw_prediction)
|
1077 |
+
else:
|
1078 |
+
gradient_out = np.empty_like(proba_out)
|
1079 |
+
elif proba_out is None:
|
1080 |
+
proba_out = np.empty_like(gradient_out)
|
1081 |
+
|
1082 |
+
self.closs.gradient_proba(
|
1083 |
+
y_true=y_true,
|
1084 |
+
raw_prediction=raw_prediction,
|
1085 |
+
sample_weight=sample_weight,
|
1086 |
+
gradient_out=gradient_out,
|
1087 |
+
proba_out=proba_out,
|
1088 |
+
n_threads=n_threads,
|
1089 |
+
)
|
1090 |
+
return gradient_out, proba_out
|
1091 |
+
|
1092 |
+
|
1093 |
+
class ExponentialLoss(BaseLoss):
|
1094 |
+
"""Exponential loss with (half) logit link, for binary classification.
|
1095 |
+
|
1096 |
+
This is also know as boosting loss.
|
1097 |
+
|
1098 |
+
Domain:
|
1099 |
+
y_true in [0, 1], i.e. regression on the unit interval
|
1100 |
+
y_pred in (0, 1), i.e. boundaries excluded
|
1101 |
+
|
1102 |
+
Link:
|
1103 |
+
y_pred = expit(2 * raw_prediction)
|
1104 |
+
|
1105 |
+
For a given sample x_i, the exponential loss is defined as::
|
1106 |
+
|
1107 |
+
loss(x_i) = y_true_i * exp(-raw_pred_i)) + (1 - y_true_i) * exp(raw_pred_i)
|
1108 |
+
|
1109 |
+
See:
|
1110 |
+
- J. Friedman, T. Hastie, R. Tibshirani.
|
1111 |
+
"Additive logistic regression: a statistical view of boosting (With discussion
|
1112 |
+
and a rejoinder by the authors)." Ann. Statist. 28 (2) 337 - 407, April 2000.
|
1113 |
+
https://doi.org/10.1214/aos/1016218223
|
1114 |
+
- A. Buja, W. Stuetzle, Y. Shen. (2005).
|
1115 |
+
"Loss Functions for Binary Class Probability Estimation and Classification:
|
1116 |
+
Structure and Applications."
|
1117 |
+
|
1118 |
+
Note that the formulation works for classification, y = {0, 1}, as well as
|
1119 |
+
"exponential logistic" regression, y = [0, 1].
|
1120 |
+
Note that this is a proper scoring rule, but without it's canonical link.
|
1121 |
+
|
1122 |
+
More details: Inserting the predicted probability
|
1123 |
+
y_pred = expit(2 * raw_prediction) in the loss gives::
|
1124 |
+
|
1125 |
+
loss(x_i) = y_true_i * sqrt((1 - y_pred_i) / y_pred_i)
|
1126 |
+
+ (1 - y_true_i) * sqrt(y_pred_i / (1 - y_pred_i))
|
1127 |
+
"""
|
1128 |
+
|
1129 |
+
def __init__(self, sample_weight=None):
|
1130 |
+
super().__init__(
|
1131 |
+
closs=CyExponentialLoss(),
|
1132 |
+
link=HalfLogitLink(),
|
1133 |
+
n_classes=2,
|
1134 |
+
)
|
1135 |
+
self.interval_y_true = Interval(0, 1, True, True)
|
1136 |
+
|
1137 |
+
def constant_to_optimal_zero(self, y_true, sample_weight=None):
|
1138 |
+
# This is non-zero only if y_true is neither 0 nor 1.
|
1139 |
+
term = -2 * np.sqrt(y_true * (1 - y_true))
|
1140 |
+
if sample_weight is not None:
|
1141 |
+
term *= sample_weight
|
1142 |
+
return term
|
1143 |
+
|
1144 |
+
def predict_proba(self, raw_prediction):
|
1145 |
+
"""Predict probabilities.
|
1146 |
+
|
1147 |
+
Parameters
|
1148 |
+
----------
|
1149 |
+
raw_prediction : array of shape (n_samples,) or (n_samples, 1)
|
1150 |
+
Raw prediction values (in link space).
|
1151 |
+
|
1152 |
+
Returns
|
1153 |
+
-------
|
1154 |
+
proba : array of shape (n_samples, 2)
|
1155 |
+
Element-wise class probabilities.
|
1156 |
+
"""
|
1157 |
+
# Be graceful to shape (n_samples, 1) -> (n_samples,)
|
1158 |
+
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
|
1159 |
+
raw_prediction = raw_prediction.squeeze(1)
|
1160 |
+
proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
|
1161 |
+
proba[:, 1] = self.link.inverse(raw_prediction)
|
1162 |
+
proba[:, 0] = 1 - proba[:, 1]
|
1163 |
+
return proba
|
1164 |
+
|
1165 |
+
|
1166 |
+
_LOSSES = {
|
1167 |
+
"squared_error": HalfSquaredError,
|
1168 |
+
"absolute_error": AbsoluteError,
|
1169 |
+
"pinball_loss": PinballLoss,
|
1170 |
+
"huber_loss": HuberLoss,
|
1171 |
+
"poisson_loss": HalfPoissonLoss,
|
1172 |
+
"gamma_loss": HalfGammaLoss,
|
1173 |
+
"tweedie_loss": HalfTweedieLoss,
|
1174 |
+
"binomial_loss": HalfBinomialLoss,
|
1175 |
+
"multinomial_loss": HalfMultinomialLoss,
|
1176 |
+
"exponential_loss": ExponentialLoss,
|
1177 |
+
}
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (184 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_link.cpython-310.pyc
ADDED
Binary file (2.66 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/__pycache__/test_loss.cpython-310.pyc
ADDED
Binary file (27 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_link.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
4 |
+
|
5 |
+
from sklearn._loss.link import (
|
6 |
+
_LINKS,
|
7 |
+
HalfLogitLink,
|
8 |
+
Interval,
|
9 |
+
MultinomialLogit,
|
10 |
+
_inclusive_low_high,
|
11 |
+
)
|
12 |
+
|
13 |
+
LINK_FUNCTIONS = list(_LINKS.values())
|
14 |
+
|
15 |
+
|
16 |
+
def test_interval_raises():
|
17 |
+
"""Test that interval with low > high raises ValueError."""
|
18 |
+
with pytest.raises(
|
19 |
+
ValueError, match="One must have low <= high; got low=1, high=0."
|
20 |
+
):
|
21 |
+
Interval(1, 0, False, False)
|
22 |
+
|
23 |
+
|
24 |
+
@pytest.mark.parametrize(
|
25 |
+
"interval",
|
26 |
+
[
|
27 |
+
Interval(0, 1, False, False),
|
28 |
+
Interval(0, 1, False, True),
|
29 |
+
Interval(0, 1, True, False),
|
30 |
+
Interval(0, 1, True, True),
|
31 |
+
Interval(-np.inf, np.inf, False, False),
|
32 |
+
Interval(-np.inf, np.inf, False, True),
|
33 |
+
Interval(-np.inf, np.inf, True, False),
|
34 |
+
Interval(-np.inf, np.inf, True, True),
|
35 |
+
Interval(-10, -1, False, False),
|
36 |
+
Interval(-10, -1, False, True),
|
37 |
+
Interval(-10, -1, True, False),
|
38 |
+
Interval(-10, -1, True, True),
|
39 |
+
],
|
40 |
+
)
|
41 |
+
def test_is_in_range(interval):
|
42 |
+
# make sure low and high are always within the interval, used for linspace
|
43 |
+
low, high = _inclusive_low_high(interval)
|
44 |
+
|
45 |
+
x = np.linspace(low, high, num=10)
|
46 |
+
assert interval.includes(x)
|
47 |
+
|
48 |
+
# x contains lower bound
|
49 |
+
assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive
|
50 |
+
|
51 |
+
# x contains upper bound
|
52 |
+
assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive
|
53 |
+
|
54 |
+
# x contains upper and lower bound
|
55 |
+
assert interval.includes(np.r_[x, interval.low, interval.high]) == (
|
56 |
+
interval.low_inclusive and interval.high_inclusive
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.mark.parametrize("link", LINK_FUNCTIONS)
|
61 |
+
def test_link_inverse_identity(link, global_random_seed):
|
62 |
+
# Test that link of inverse gives identity.
|
63 |
+
rng = np.random.RandomState(global_random_seed)
|
64 |
+
link = link()
|
65 |
+
n_samples, n_classes = 100, None
|
66 |
+
# The values for `raw_prediction` are limited from -20 to 20 because in the
|
67 |
+
# class `LogitLink` the term `expit(x)` comes very close to 1 for large
|
68 |
+
# positive x and therefore loses precision.
|
69 |
+
if link.is_multiclass:
|
70 |
+
n_classes = 10
|
71 |
+
raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes))
|
72 |
+
if isinstance(link, MultinomialLogit):
|
73 |
+
raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
|
74 |
+
elif isinstance(link, HalfLogitLink):
|
75 |
+
raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples))
|
76 |
+
else:
|
77 |
+
raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples))
|
78 |
+
|
79 |
+
assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction)
|
80 |
+
y_pred = link.inverse(raw_prediction)
|
81 |
+
assert_allclose(link.inverse(link.link(y_pred)), y_pred)
|
82 |
+
|
83 |
+
|
84 |
+
@pytest.mark.parametrize("link", LINK_FUNCTIONS)
|
85 |
+
def test_link_out_argument(link):
|
86 |
+
# Test that out argument gets assigned the result.
|
87 |
+
rng = np.random.RandomState(42)
|
88 |
+
link = link()
|
89 |
+
n_samples, n_classes = 100, None
|
90 |
+
if link.is_multiclass:
|
91 |
+
n_classes = 10
|
92 |
+
raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes))
|
93 |
+
if isinstance(link, MultinomialLogit):
|
94 |
+
raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
|
95 |
+
else:
|
96 |
+
# So far, the valid interval of raw_prediction is (-inf, inf) and
|
97 |
+
# we do not need to distinguish.
|
98 |
+
raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples))
|
99 |
+
|
100 |
+
y_pred = link.inverse(raw_prediction, out=None)
|
101 |
+
out = np.empty_like(raw_prediction)
|
102 |
+
y_pred_2 = link.inverse(raw_prediction, out=out)
|
103 |
+
assert_allclose(y_pred, out)
|
104 |
+
assert_array_equal(out, y_pred_2)
|
105 |
+
assert np.shares_memory(out, y_pred_2)
|
106 |
+
|
107 |
+
out = np.empty_like(y_pred)
|
108 |
+
raw_prediction_2 = link.link(y_pred, out=out)
|
109 |
+
assert_allclose(raw_prediction, out)
|
110 |
+
assert_array_equal(out, raw_prediction_2)
|
111 |
+
assert np.shares_memory(out, raw_prediction_2)
|
env-llmeval/lib/python3.10/site-packages/sklearn/_loss/tests/test_loss.py
ADDED
@@ -0,0 +1,1320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
6 |
+
from pytest import approx
|
7 |
+
from scipy.optimize import (
|
8 |
+
LinearConstraint,
|
9 |
+
minimize,
|
10 |
+
minimize_scalar,
|
11 |
+
newton,
|
12 |
+
)
|
13 |
+
from scipy.special import logsumexp
|
14 |
+
|
15 |
+
from sklearn._loss.link import IdentityLink, _inclusive_low_high
|
16 |
+
from sklearn._loss.loss import (
|
17 |
+
_LOSSES,
|
18 |
+
AbsoluteError,
|
19 |
+
BaseLoss,
|
20 |
+
HalfBinomialLoss,
|
21 |
+
HalfGammaLoss,
|
22 |
+
HalfMultinomialLoss,
|
23 |
+
HalfPoissonLoss,
|
24 |
+
HalfSquaredError,
|
25 |
+
HalfTweedieLoss,
|
26 |
+
HalfTweedieLossIdentity,
|
27 |
+
HuberLoss,
|
28 |
+
PinballLoss,
|
29 |
+
)
|
30 |
+
from sklearn.utils import _IS_WASM, assert_all_finite
|
31 |
+
from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit
|
32 |
+
|
33 |
+
ALL_LOSSES = list(_LOSSES.values())
|
34 |
+
|
35 |
+
LOSS_INSTANCES = [loss() for loss in ALL_LOSSES]
|
36 |
+
# HalfTweedieLoss(power=1.5) is already there as default
|
37 |
+
LOSS_INSTANCES += [
|
38 |
+
PinballLoss(quantile=0.25),
|
39 |
+
HuberLoss(quantile=0.75),
|
40 |
+
HalfTweedieLoss(power=-1.5),
|
41 |
+
HalfTweedieLoss(power=0),
|
42 |
+
HalfTweedieLoss(power=1),
|
43 |
+
HalfTweedieLoss(power=2),
|
44 |
+
HalfTweedieLoss(power=3.0),
|
45 |
+
HalfTweedieLossIdentity(power=0),
|
46 |
+
HalfTweedieLossIdentity(power=1),
|
47 |
+
HalfTweedieLossIdentity(power=2),
|
48 |
+
HalfTweedieLossIdentity(power=3.0),
|
49 |
+
]
|
50 |
+
|
51 |
+
|
52 |
+
def loss_instance_name(param):
|
53 |
+
if isinstance(param, BaseLoss):
|
54 |
+
loss = param
|
55 |
+
name = loss.__class__.__name__
|
56 |
+
if isinstance(loss, PinballLoss):
|
57 |
+
name += f"(quantile={loss.closs.quantile})"
|
58 |
+
elif isinstance(loss, HuberLoss):
|
59 |
+
name += f"(quantile={loss.quantile}"
|
60 |
+
elif hasattr(loss, "closs") and hasattr(loss.closs, "power"):
|
61 |
+
name += f"(power={loss.closs.power})"
|
62 |
+
return name
|
63 |
+
else:
|
64 |
+
return str(param)
|
65 |
+
|
66 |
+
|
67 |
+
def random_y_true_raw_prediction(
|
68 |
+
loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42
|
69 |
+
):
|
70 |
+
"""Random generate y_true and raw_prediction in valid range."""
|
71 |
+
rng = np.random.RandomState(seed)
|
72 |
+
if loss.is_multiclass:
|
73 |
+
raw_prediction = np.empty((n_samples, loss.n_classes))
|
74 |
+
raw_prediction.flat[:] = rng.uniform(
|
75 |
+
low=raw_bound[0],
|
76 |
+
high=raw_bound[1],
|
77 |
+
size=n_samples * loss.n_classes,
|
78 |
+
)
|
79 |
+
y_true = np.arange(n_samples).astype(float) % loss.n_classes
|
80 |
+
else:
|
81 |
+
# If link is identity, we must respect the interval of y_pred:
|
82 |
+
if isinstance(loss.link, IdentityLink):
|
83 |
+
low, high = _inclusive_low_high(loss.interval_y_pred)
|
84 |
+
low = np.amax([low, raw_bound[0]])
|
85 |
+
high = np.amin([high, raw_bound[1]])
|
86 |
+
raw_bound = (low, high)
|
87 |
+
raw_prediction = rng.uniform(
|
88 |
+
low=raw_bound[0], high=raw_bound[1], size=n_samples
|
89 |
+
)
|
90 |
+
# generate a y_true in valid range
|
91 |
+
low, high = _inclusive_low_high(loss.interval_y_true)
|
92 |
+
low = max(low, y_bound[0])
|
93 |
+
high = min(high, y_bound[1])
|
94 |
+
y_true = rng.uniform(low, high, size=n_samples)
|
95 |
+
# set some values at special boundaries
|
96 |
+
if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive:
|
97 |
+
y_true[:: (n_samples // 3)] = 0
|
98 |
+
if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive:
|
99 |
+
y_true[1 :: (n_samples // 3)] = 1
|
100 |
+
|
101 |
+
return y_true, raw_prediction
|
102 |
+
|
103 |
+
|
104 |
+
def numerical_derivative(func, x, eps):
|
105 |
+
"""Helper function for numerical (first) derivatives."""
|
106 |
+
# For numerical derivatives, see
|
107 |
+
# https://en.wikipedia.org/wiki/Numerical_differentiation
|
108 |
+
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
|
109 |
+
# We use central finite differences of accuracy 4.
|
110 |
+
h = np.full_like(x, fill_value=eps)
|
111 |
+
f_minus_2h = func(x - 2 * h)
|
112 |
+
f_minus_1h = func(x - h)
|
113 |
+
f_plus_1h = func(x + h)
|
114 |
+
f_plus_2h = func(x + 2 * h)
|
115 |
+
return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps)
|
116 |
+
|
117 |
+
|
118 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
119 |
+
def test_loss_boundary(loss):
|
120 |
+
"""Test interval ranges of y_true and y_pred in losses."""
|
121 |
+
# make sure low and high are always within the interval, used for linspace
|
122 |
+
if loss.is_multiclass:
|
123 |
+
y_true = np.linspace(0, 9, num=10)
|
124 |
+
else:
|
125 |
+
low, high = _inclusive_low_high(loss.interval_y_true)
|
126 |
+
y_true = np.linspace(low, high, num=10)
|
127 |
+
|
128 |
+
# add boundaries if they are included
|
129 |
+
if loss.interval_y_true.low_inclusive:
|
130 |
+
y_true = np.r_[y_true, loss.interval_y_true.low]
|
131 |
+
if loss.interval_y_true.high_inclusive:
|
132 |
+
y_true = np.r_[y_true, loss.interval_y_true.high]
|
133 |
+
|
134 |
+
assert loss.in_y_true_range(y_true)
|
135 |
+
|
136 |
+
n = y_true.shape[0]
|
137 |
+
low, high = _inclusive_low_high(loss.interval_y_pred)
|
138 |
+
if loss.is_multiclass:
|
139 |
+
y_pred = np.empty((n, 3))
|
140 |
+
y_pred[:, 0] = np.linspace(low, high, num=n)
|
141 |
+
y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0])
|
142 |
+
y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0])
|
143 |
+
else:
|
144 |
+
y_pred = np.linspace(low, high, num=n)
|
145 |
+
|
146 |
+
assert loss.in_y_pred_range(y_pred)
|
147 |
+
|
148 |
+
# calculating losses should not fail
|
149 |
+
raw_prediction = loss.link.link(y_pred)
|
150 |
+
loss.loss(y_true=y_true, raw_prediction=raw_prediction)
|
151 |
+
|
152 |
+
|
153 |
+
# Fixture to test valid value ranges.
|
154 |
+
Y_COMMON_PARAMS = [
|
155 |
+
# (loss, [y success], [y fail])
|
156 |
+
(HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
157 |
+
(AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
158 |
+
(PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
159 |
+
(HuberLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
|
160 |
+
(HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
|
161 |
+
(HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
162 |
+
(HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]),
|
163 |
+
(HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]),
|
164 |
+
(HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
|
165 |
+
(HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
166 |
+
(HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
167 |
+
(HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]),
|
168 |
+
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]),
|
169 |
+
(HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
|
170 |
+
(HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
171 |
+
(HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
|
172 |
+
(HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]),
|
173 |
+
(HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]),
|
174 |
+
]
|
175 |
+
# y_pred and y_true do not always have the same domain (valid value range).
|
176 |
+
# Hence, we define extra sets of parameters for each of them.
|
177 |
+
Y_TRUE_PARAMS = [ # type: ignore
|
178 |
+
# (loss, [y success], [y fail])
|
179 |
+
(HalfPoissonLoss(), [0], []),
|
180 |
+
(HuberLoss(), [0], []),
|
181 |
+
(HalfTweedieLoss(power=-3), [-100, -0.1, 0], []),
|
182 |
+
(HalfTweedieLoss(power=0), [-100, 0], []),
|
183 |
+
(HalfTweedieLoss(power=1.5), [0], []),
|
184 |
+
(HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []),
|
185 |
+
(HalfTweedieLossIdentity(power=0), [-100, 0], []),
|
186 |
+
(HalfTweedieLossIdentity(power=1.5), [0], []),
|
187 |
+
(HalfBinomialLoss(), [0, 1], []),
|
188 |
+
(HalfMultinomialLoss(), [0.0, 1.0, 2], []),
|
189 |
+
]
|
190 |
+
Y_PRED_PARAMS = [
|
191 |
+
# (loss, [y success], [y fail])
|
192 |
+
(HalfPoissonLoss(), [], [0]),
|
193 |
+
(HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]),
|
194 |
+
(HalfTweedieLoss(power=0), [], [-3, -0.1, 0]),
|
195 |
+
(HalfTweedieLoss(power=1.5), [], [0]),
|
196 |
+
(HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]),
|
197 |
+
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []),
|
198 |
+
(HalfTweedieLossIdentity(power=1.5), [], [0]),
|
199 |
+
(HalfBinomialLoss(), [], [0, 1]),
|
200 |
+
(HalfMultinomialLoss(), [0.1, 0.5], [0, 1]),
|
201 |
+
]
|
202 |
+
|
203 |
+
|
204 |
+
@pytest.mark.parametrize(
|
205 |
+
"loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS
|
206 |
+
)
|
207 |
+
def test_loss_boundary_y_true(loss, y_true_success, y_true_fail):
|
208 |
+
"""Test boundaries of y_true for loss functions."""
|
209 |
+
for y in y_true_success:
|
210 |
+
assert loss.in_y_true_range(np.array([y]))
|
211 |
+
for y in y_true_fail:
|
212 |
+
assert not loss.in_y_true_range(np.array([y]))
|
213 |
+
|
214 |
+
|
215 |
+
@pytest.mark.parametrize(
|
216 |
+
"loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore
|
217 |
+
)
|
218 |
+
def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail):
|
219 |
+
"""Test boundaries of y_pred for loss functions."""
|
220 |
+
for y in y_pred_success:
|
221 |
+
assert loss.in_y_pred_range(np.array([y]))
|
222 |
+
for y in y_pred_fail:
|
223 |
+
assert not loss.in_y_pred_range(np.array([y]))
|
224 |
+
|
225 |
+
|
226 |
+
@pytest.mark.parametrize(
|
227 |
+
"loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true",
|
228 |
+
[
|
229 |
+
(HalfSquaredError(), 1.0, 5.0, 8, 4, 1),
|
230 |
+
(AbsoluteError(), 1.0, 5.0, 4.0, 1.0, None),
|
231 |
+
(PinballLoss(quantile=0.5), 1.0, 5.0, 2, 0.5, None),
|
232 |
+
(PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25), 1 - 0.25, None),
|
233 |
+
(PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25, -0.25, None),
|
234 |
+
(HuberLoss(quantile=0.5, delta=3), 1.0, 5.0, 3 * (4 - 3 / 2), None, None),
|
235 |
+
(HuberLoss(quantile=0.5, delta=3), 1.0, 3.0, 0.5 * 2**2, None, None),
|
236 |
+
(HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4), 4 - 2, 4),
|
237 |
+
(HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4, 1 - 2 / 4, 2 / 4),
|
238 |
+
(HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2, None, None),
|
239 |
+
(HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2), None, None),
|
240 |
+
(HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2, None, None),
|
241 |
+
(
|
242 |
+
HalfTweedieLossIdentity(power=3),
|
243 |
+
2.0,
|
244 |
+
4.0,
|
245 |
+
-1 / 4 + 1 / 4**2 + 1 / 2 / 2,
|
246 |
+
None,
|
247 |
+
None,
|
248 |
+
),
|
249 |
+
(
|
250 |
+
HalfBinomialLoss(),
|
251 |
+
0.25,
|
252 |
+
np.log(4),
|
253 |
+
np.log1p(4) - 0.25 * np.log(4),
|
254 |
+
None,
|
255 |
+
None,
|
256 |
+
),
|
257 |
+
# Extreme log loss cases, checked with mpmath:
|
258 |
+
# import mpmath as mp
|
259 |
+
#
|
260 |
+
# # Stolen from scipy
|
261 |
+
# def mpf2float(x):
|
262 |
+
# return float(mp.nstr(x, 17, min_fixed=0, max_fixed=0))
|
263 |
+
#
|
264 |
+
# def mp_logloss(y_true, raw):
|
265 |
+
# with mp.workdps(100):
|
266 |
+
# y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
|
267 |
+
# out = mp.log1p(mp.exp(raw)) - y_true * raw
|
268 |
+
# return mpf2float(out)
|
269 |
+
#
|
270 |
+
# def mp_gradient(y_true, raw):
|
271 |
+
# with mp.workdps(100):
|
272 |
+
# y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
|
273 |
+
# out = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) - y_true
|
274 |
+
# return mpf2float(out)
|
275 |
+
#
|
276 |
+
# def mp_hessian(y_true, raw):
|
277 |
+
# with mp.workdps(100):
|
278 |
+
# y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
|
279 |
+
# p = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw))
|
280 |
+
# out = p * (mp.mpf(1) - p)
|
281 |
+
# return mpf2float(out)
|
282 |
+
#
|
283 |
+
# y, raw = 0.0, 37.
|
284 |
+
# mp_logloss(y, raw), mp_gradient(y, raw), mp_hessian(y, raw)
|
285 |
+
(HalfBinomialLoss(), 0.0, -1e20, 0, 0, 0),
|
286 |
+
(HalfBinomialLoss(), 1.0, -1e20, 1e20, -1, 0),
|
287 |
+
(HalfBinomialLoss(), 0.0, -1e3, 0, 0, 0),
|
288 |
+
(HalfBinomialLoss(), 1.0, -1e3, 1e3, -1, 0),
|
289 |
+
(HalfBinomialLoss(), 1.0, -37.5, 37.5, -1, 0),
|
290 |
+
(HalfBinomialLoss(), 1.0, -37.0, 37, 1e-16 - 1, 8.533047625744065e-17),
|
291 |
+
(HalfBinomialLoss(), 0.0, -37.0, *[8.533047625744065e-17] * 3),
|
292 |
+
(HalfBinomialLoss(), 1.0, -36.9, 36.9, 1e-16 - 1, 9.430476078526806e-17),
|
293 |
+
(HalfBinomialLoss(), 0.0, -36.9, *[9.430476078526806e-17] * 3),
|
294 |
+
(HalfBinomialLoss(), 0.0, 37.0, 37, 1 - 1e-16, 8.533047625744065e-17),
|
295 |
+
(HalfBinomialLoss(), 1.0, 37.0, *[8.533047625744066e-17] * 3),
|
296 |
+
(HalfBinomialLoss(), 0.0, 37.5, 37.5, 1, 5.175555005801868e-17),
|
297 |
+
(HalfBinomialLoss(), 0.0, 232.8, 232.8, 1, 1.4287342391028437e-101),
|
298 |
+
(HalfBinomialLoss(), 1.0, 1e20, 0, 0, 0),
|
299 |
+
(HalfBinomialLoss(), 0.0, 1e20, 1e20, 1, 0),
|
300 |
+
(
|
301 |
+
HalfBinomialLoss(),
|
302 |
+
1.0,
|
303 |
+
232.8,
|
304 |
+
0,
|
305 |
+
-1.4287342391028437e-101,
|
306 |
+
1.4287342391028437e-101,
|
307 |
+
),
|
308 |
+
(HalfBinomialLoss(), 1.0, 232.9, 0, 0, 0),
|
309 |
+
(HalfBinomialLoss(), 1.0, 1e3, 0, 0, 0),
|
310 |
+
(HalfBinomialLoss(), 0.0, 1e3, 1e3, 1, 0),
|
311 |
+
(
|
312 |
+
HalfMultinomialLoss(n_classes=3),
|
313 |
+
0.0,
|
314 |
+
[0.2, 0.5, 0.3],
|
315 |
+
logsumexp([0.2, 0.5, 0.3]) - 0.2,
|
316 |
+
None,
|
317 |
+
None,
|
318 |
+
),
|
319 |
+
(
|
320 |
+
HalfMultinomialLoss(n_classes=3),
|
321 |
+
1.0,
|
322 |
+
[0.2, 0.5, 0.3],
|
323 |
+
logsumexp([0.2, 0.5, 0.3]) - 0.5,
|
324 |
+
None,
|
325 |
+
None,
|
326 |
+
),
|
327 |
+
(
|
328 |
+
HalfMultinomialLoss(n_classes=3),
|
329 |
+
2.0,
|
330 |
+
[0.2, 0.5, 0.3],
|
331 |
+
logsumexp([0.2, 0.5, 0.3]) - 0.3,
|
332 |
+
None,
|
333 |
+
None,
|
334 |
+
),
|
335 |
+
(
|
336 |
+
HalfMultinomialLoss(n_classes=3),
|
337 |
+
2.0,
|
338 |
+
[1e4, 0, 7e-7],
|
339 |
+
logsumexp([1e4, 0, 7e-7]) - (7e-7),
|
340 |
+
None,
|
341 |
+
None,
|
342 |
+
),
|
343 |
+
],
|
344 |
+
ids=loss_instance_name,
|
345 |
+
)
|
346 |
+
def test_loss_on_specific_values(
|
347 |
+
loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true
|
348 |
+
):
|
349 |
+
"""Test losses, gradients and hessians at specific values."""
|
350 |
+
loss1 = loss(y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]))
|
351 |
+
grad1 = loss.gradient(
|
352 |
+
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
|
353 |
+
)
|
354 |
+
loss2, grad2 = loss.loss_gradient(
|
355 |
+
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
|
356 |
+
)
|
357 |
+
grad3, hess = loss.gradient_hessian(
|
358 |
+
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
|
359 |
+
)
|
360 |
+
|
361 |
+
assert loss1 == approx(loss_true, rel=1e-15, abs=1e-15)
|
362 |
+
assert loss2 == approx(loss_true, rel=1e-15, abs=1e-15)
|
363 |
+
|
364 |
+
if gradient_true is not None:
|
365 |
+
assert grad1 == approx(gradient_true, rel=1e-15, abs=1e-15)
|
366 |
+
assert grad2 == approx(gradient_true, rel=1e-15, abs=1e-15)
|
367 |
+
assert grad3 == approx(gradient_true, rel=1e-15, abs=1e-15)
|
368 |
+
|
369 |
+
if hessian_true is not None:
|
370 |
+
assert hess == approx(hessian_true, rel=1e-15, abs=1e-15)
|
371 |
+
|
372 |
+
|
373 |
+
@pytest.mark.parametrize("loss", ALL_LOSSES)
|
374 |
+
@pytest.mark.parametrize("readonly_memmap", [False, True])
|
375 |
+
@pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
|
376 |
+
@pytest.mark.parametrize("dtype_out", [np.float32, np.float64])
|
377 |
+
@pytest.mark.parametrize("sample_weight", [None, 1])
|
378 |
+
@pytest.mark.parametrize("out1", [None, 1])
|
379 |
+
@pytest.mark.parametrize("out2", [None, 1])
|
380 |
+
@pytest.mark.parametrize("n_threads", [1, 2])
|
381 |
+
def test_loss_dtype(
|
382 |
+
loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads
|
383 |
+
):
|
384 |
+
"""Test acceptance of dtypes, readonly and writeable arrays in loss functions.
|
385 |
+
|
386 |
+
Check that loss accepts if all input arrays are either all float32 or all
|
387 |
+
float64, and all output arrays are either all float32 or all float64.
|
388 |
+
|
389 |
+
Also check that input arrays can be readonly, e.g. memory mapped.
|
390 |
+
"""
|
391 |
+
if _IS_WASM and readonly_memmap: # pragma: nocover
|
392 |
+
pytest.xfail(reason="memmap not fully supported")
|
393 |
+
|
394 |
+
loss = loss()
|
395 |
+
# generate a y_true and raw_prediction in valid range
|
396 |
+
n_samples = 5
|
397 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
398 |
+
loss=loss,
|
399 |
+
n_samples=n_samples,
|
400 |
+
y_bound=(-100, 100),
|
401 |
+
raw_bound=(-10, 10),
|
402 |
+
seed=42,
|
403 |
+
)
|
404 |
+
y_true = y_true.astype(dtype_in)
|
405 |
+
raw_prediction = raw_prediction.astype(dtype_in)
|
406 |
+
|
407 |
+
if sample_weight is not None:
|
408 |
+
sample_weight = np.array([2.0] * n_samples, dtype=dtype_in)
|
409 |
+
if out1 is not None:
|
410 |
+
out1 = np.empty_like(y_true, dtype=dtype_out)
|
411 |
+
if out2 is not None:
|
412 |
+
out2 = np.empty_like(raw_prediction, dtype=dtype_out)
|
413 |
+
|
414 |
+
if readonly_memmap:
|
415 |
+
y_true = create_memmap_backed_data(y_true)
|
416 |
+
raw_prediction = create_memmap_backed_data(raw_prediction)
|
417 |
+
if sample_weight is not None:
|
418 |
+
sample_weight = create_memmap_backed_data(sample_weight)
|
419 |
+
|
420 |
+
loss.loss(
|
421 |
+
y_true=y_true,
|
422 |
+
raw_prediction=raw_prediction,
|
423 |
+
sample_weight=sample_weight,
|
424 |
+
loss_out=out1,
|
425 |
+
n_threads=n_threads,
|
426 |
+
)
|
427 |
+
loss.gradient(
|
428 |
+
y_true=y_true,
|
429 |
+
raw_prediction=raw_prediction,
|
430 |
+
sample_weight=sample_weight,
|
431 |
+
gradient_out=out2,
|
432 |
+
n_threads=n_threads,
|
433 |
+
)
|
434 |
+
loss.loss_gradient(
|
435 |
+
y_true=y_true,
|
436 |
+
raw_prediction=raw_prediction,
|
437 |
+
sample_weight=sample_weight,
|
438 |
+
loss_out=out1,
|
439 |
+
gradient_out=out2,
|
440 |
+
n_threads=n_threads,
|
441 |
+
)
|
442 |
+
if out1 is not None and loss.is_multiclass:
|
443 |
+
out1 = np.empty_like(raw_prediction, dtype=dtype_out)
|
444 |
+
loss.gradient_hessian(
|
445 |
+
y_true=y_true,
|
446 |
+
raw_prediction=raw_prediction,
|
447 |
+
sample_weight=sample_weight,
|
448 |
+
gradient_out=out1,
|
449 |
+
hessian_out=out2,
|
450 |
+
n_threads=n_threads,
|
451 |
+
)
|
452 |
+
loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight)
|
453 |
+
loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
|
454 |
+
loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)
|
455 |
+
if hasattr(loss, "predict_proba"):
|
456 |
+
loss.predict_proba(raw_prediction=raw_prediction)
|
457 |
+
if hasattr(loss, "gradient_proba"):
|
458 |
+
loss.gradient_proba(
|
459 |
+
y_true=y_true,
|
460 |
+
raw_prediction=raw_prediction,
|
461 |
+
sample_weight=sample_weight,
|
462 |
+
gradient_out=out1,
|
463 |
+
proba_out=out2,
|
464 |
+
n_threads=n_threads,
|
465 |
+
)
|
466 |
+
|
467 |
+
|
468 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
469 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
470 |
+
def test_loss_same_as_C_functions(loss, sample_weight):
|
471 |
+
"""Test that Python and Cython functions return same results."""
|
472 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
473 |
+
loss=loss,
|
474 |
+
n_samples=20,
|
475 |
+
y_bound=(-100, 100),
|
476 |
+
raw_bound=(-10, 10),
|
477 |
+
seed=42,
|
478 |
+
)
|
479 |
+
if sample_weight == "range":
|
480 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
481 |
+
|
482 |
+
out_l1 = np.empty_like(y_true)
|
483 |
+
out_l2 = np.empty_like(y_true)
|
484 |
+
out_g1 = np.empty_like(raw_prediction)
|
485 |
+
out_g2 = np.empty_like(raw_prediction)
|
486 |
+
out_h1 = np.empty_like(raw_prediction)
|
487 |
+
out_h2 = np.empty_like(raw_prediction)
|
488 |
+
loss.loss(
|
489 |
+
y_true=y_true,
|
490 |
+
raw_prediction=raw_prediction,
|
491 |
+
sample_weight=sample_weight,
|
492 |
+
loss_out=out_l1,
|
493 |
+
)
|
494 |
+
loss.closs.loss(
|
495 |
+
y_true=y_true,
|
496 |
+
raw_prediction=raw_prediction,
|
497 |
+
sample_weight=sample_weight,
|
498 |
+
loss_out=out_l2,
|
499 |
+
),
|
500 |
+
assert_allclose(out_l1, out_l2)
|
501 |
+
loss.gradient(
|
502 |
+
y_true=y_true,
|
503 |
+
raw_prediction=raw_prediction,
|
504 |
+
sample_weight=sample_weight,
|
505 |
+
gradient_out=out_g1,
|
506 |
+
)
|
507 |
+
loss.closs.gradient(
|
508 |
+
y_true=y_true,
|
509 |
+
raw_prediction=raw_prediction,
|
510 |
+
sample_weight=sample_weight,
|
511 |
+
gradient_out=out_g2,
|
512 |
+
)
|
513 |
+
assert_allclose(out_g1, out_g2)
|
514 |
+
loss.closs.loss_gradient(
|
515 |
+
y_true=y_true,
|
516 |
+
raw_prediction=raw_prediction,
|
517 |
+
sample_weight=sample_weight,
|
518 |
+
loss_out=out_l1,
|
519 |
+
gradient_out=out_g1,
|
520 |
+
)
|
521 |
+
loss.closs.loss_gradient(
|
522 |
+
y_true=y_true,
|
523 |
+
raw_prediction=raw_prediction,
|
524 |
+
sample_weight=sample_weight,
|
525 |
+
loss_out=out_l2,
|
526 |
+
gradient_out=out_g2,
|
527 |
+
)
|
528 |
+
assert_allclose(out_l1, out_l2)
|
529 |
+
assert_allclose(out_g1, out_g2)
|
530 |
+
loss.gradient_hessian(
|
531 |
+
y_true=y_true,
|
532 |
+
raw_prediction=raw_prediction,
|
533 |
+
sample_weight=sample_weight,
|
534 |
+
gradient_out=out_g1,
|
535 |
+
hessian_out=out_h1,
|
536 |
+
)
|
537 |
+
loss.closs.gradient_hessian(
|
538 |
+
y_true=y_true,
|
539 |
+
raw_prediction=raw_prediction,
|
540 |
+
sample_weight=sample_weight,
|
541 |
+
gradient_out=out_g2,
|
542 |
+
hessian_out=out_h2,
|
543 |
+
)
|
544 |
+
assert_allclose(out_g1, out_g2)
|
545 |
+
assert_allclose(out_h1, out_h2)
|
546 |
+
|
547 |
+
|
548 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
549 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
550 |
+
def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed):
|
551 |
+
"""Test that loss and gradient are the same across different functions.
|
552 |
+
|
553 |
+
Also test that output arguments contain correct results.
|
554 |
+
"""
|
555 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
556 |
+
loss=loss,
|
557 |
+
n_samples=20,
|
558 |
+
y_bound=(-100, 100),
|
559 |
+
raw_bound=(-10, 10),
|
560 |
+
seed=global_random_seed,
|
561 |
+
)
|
562 |
+
if sample_weight == "range":
|
563 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
564 |
+
|
565 |
+
out_l1 = np.empty_like(y_true)
|
566 |
+
out_l2 = np.empty_like(y_true)
|
567 |
+
out_g1 = np.empty_like(raw_prediction)
|
568 |
+
out_g2 = np.empty_like(raw_prediction)
|
569 |
+
out_g3 = np.empty_like(raw_prediction)
|
570 |
+
out_h3 = np.empty_like(raw_prediction)
|
571 |
+
|
572 |
+
l1 = loss.loss(
|
573 |
+
y_true=y_true,
|
574 |
+
raw_prediction=raw_prediction,
|
575 |
+
sample_weight=sample_weight,
|
576 |
+
loss_out=out_l1,
|
577 |
+
)
|
578 |
+
g1 = loss.gradient(
|
579 |
+
y_true=y_true,
|
580 |
+
raw_prediction=raw_prediction,
|
581 |
+
sample_weight=sample_weight,
|
582 |
+
gradient_out=out_g1,
|
583 |
+
)
|
584 |
+
l2, g2 = loss.loss_gradient(
|
585 |
+
y_true=y_true,
|
586 |
+
raw_prediction=raw_prediction,
|
587 |
+
sample_weight=sample_weight,
|
588 |
+
loss_out=out_l2,
|
589 |
+
gradient_out=out_g2,
|
590 |
+
)
|
591 |
+
g3, h3 = loss.gradient_hessian(
|
592 |
+
y_true=y_true,
|
593 |
+
raw_prediction=raw_prediction,
|
594 |
+
sample_weight=sample_weight,
|
595 |
+
gradient_out=out_g3,
|
596 |
+
hessian_out=out_h3,
|
597 |
+
)
|
598 |
+
assert_allclose(l1, l2)
|
599 |
+
assert_array_equal(l1, out_l1)
|
600 |
+
assert np.shares_memory(l1, out_l1)
|
601 |
+
assert_array_equal(l2, out_l2)
|
602 |
+
assert np.shares_memory(l2, out_l2)
|
603 |
+
assert_allclose(g1, g2)
|
604 |
+
assert_allclose(g1, g3)
|
605 |
+
assert_array_equal(g1, out_g1)
|
606 |
+
assert np.shares_memory(g1, out_g1)
|
607 |
+
assert_array_equal(g2, out_g2)
|
608 |
+
assert np.shares_memory(g2, out_g2)
|
609 |
+
assert_array_equal(g3, out_g3)
|
610 |
+
assert np.shares_memory(g3, out_g3)
|
611 |
+
|
612 |
+
if hasattr(loss, "gradient_proba"):
|
613 |
+
assert loss.is_multiclass # only for HalfMultinomialLoss
|
614 |
+
out_g4 = np.empty_like(raw_prediction)
|
615 |
+
out_proba = np.empty_like(raw_prediction)
|
616 |
+
g4, proba = loss.gradient_proba(
|
617 |
+
y_true=y_true,
|
618 |
+
raw_prediction=raw_prediction,
|
619 |
+
sample_weight=sample_weight,
|
620 |
+
gradient_out=out_g4,
|
621 |
+
proba_out=out_proba,
|
622 |
+
)
|
623 |
+
assert_allclose(g1, out_g4)
|
624 |
+
assert_allclose(g1, g4)
|
625 |
+
assert_allclose(proba, out_proba)
|
626 |
+
assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11)
|
627 |
+
|
628 |
+
|
629 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
630 |
+
@pytest.mark.parametrize("sample_weight", ["ones", "random"])
|
631 |
+
def test_sample_weight_multiplies(loss, sample_weight, global_random_seed):
|
632 |
+
"""Test sample weights in loss, gradients and hessians.
|
633 |
+
|
634 |
+
Make sure that passing sample weights to loss, gradient and hessian
|
635 |
+
computation methods is equivalent to multiplying by the weights.
|
636 |
+
"""
|
637 |
+
n_samples = 100
|
638 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
639 |
+
loss=loss,
|
640 |
+
n_samples=n_samples,
|
641 |
+
y_bound=(-100, 100),
|
642 |
+
raw_bound=(-5, 5),
|
643 |
+
seed=global_random_seed,
|
644 |
+
)
|
645 |
+
|
646 |
+
if sample_weight == "ones":
|
647 |
+
sample_weight = np.ones(shape=n_samples, dtype=np.float64)
|
648 |
+
else:
|
649 |
+
rng = np.random.RandomState(global_random_seed)
|
650 |
+
sample_weight = rng.normal(size=n_samples).astype(np.float64)
|
651 |
+
|
652 |
+
assert_allclose(
|
653 |
+
loss.loss(
|
654 |
+
y_true=y_true,
|
655 |
+
raw_prediction=raw_prediction,
|
656 |
+
sample_weight=sample_weight,
|
657 |
+
),
|
658 |
+
sample_weight
|
659 |
+
* loss.loss(
|
660 |
+
y_true=y_true,
|
661 |
+
raw_prediction=raw_prediction,
|
662 |
+
sample_weight=None,
|
663 |
+
),
|
664 |
+
)
|
665 |
+
|
666 |
+
losses, gradient = loss.loss_gradient(
|
667 |
+
y_true=y_true,
|
668 |
+
raw_prediction=raw_prediction,
|
669 |
+
sample_weight=None,
|
670 |
+
)
|
671 |
+
losses_sw, gradient_sw = loss.loss_gradient(
|
672 |
+
y_true=y_true,
|
673 |
+
raw_prediction=raw_prediction,
|
674 |
+
sample_weight=sample_weight,
|
675 |
+
)
|
676 |
+
assert_allclose(losses * sample_weight, losses_sw)
|
677 |
+
if not loss.is_multiclass:
|
678 |
+
assert_allclose(gradient * sample_weight, gradient_sw)
|
679 |
+
else:
|
680 |
+
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
|
681 |
+
|
682 |
+
gradient, hessian = loss.gradient_hessian(
|
683 |
+
y_true=y_true,
|
684 |
+
raw_prediction=raw_prediction,
|
685 |
+
sample_weight=None,
|
686 |
+
)
|
687 |
+
gradient_sw, hessian_sw = loss.gradient_hessian(
|
688 |
+
y_true=y_true,
|
689 |
+
raw_prediction=raw_prediction,
|
690 |
+
sample_weight=sample_weight,
|
691 |
+
)
|
692 |
+
if not loss.is_multiclass:
|
693 |
+
assert_allclose(gradient * sample_weight, gradient_sw)
|
694 |
+
assert_allclose(hessian * sample_weight, hessian_sw)
|
695 |
+
else:
|
696 |
+
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
|
697 |
+
assert_allclose(hessian * sample_weight[:, None], hessian_sw)
|
698 |
+
|
699 |
+
|
700 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
701 |
+
def test_graceful_squeezing(loss):
|
702 |
+
"""Test that reshaped raw_prediction gives same results."""
|
703 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
704 |
+
loss=loss,
|
705 |
+
n_samples=20,
|
706 |
+
y_bound=(-100, 100),
|
707 |
+
raw_bound=(-10, 10),
|
708 |
+
seed=42,
|
709 |
+
)
|
710 |
+
|
711 |
+
if raw_prediction.ndim == 1:
|
712 |
+
raw_prediction_2d = raw_prediction[:, None]
|
713 |
+
assert_allclose(
|
714 |
+
loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d),
|
715 |
+
loss.loss(y_true=y_true, raw_prediction=raw_prediction),
|
716 |
+
)
|
717 |
+
assert_allclose(
|
718 |
+
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
|
719 |
+
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction),
|
720 |
+
)
|
721 |
+
assert_allclose(
|
722 |
+
loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
|
723 |
+
loss.gradient(y_true=y_true, raw_prediction=raw_prediction),
|
724 |
+
)
|
725 |
+
assert_allclose(
|
726 |
+
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d),
|
727 |
+
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction),
|
728 |
+
)
|
729 |
+
|
730 |
+
|
731 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
732 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
733 |
+
def test_loss_of_perfect_prediction(loss, sample_weight):
|
734 |
+
"""Test value of perfect predictions.
|
735 |
+
|
736 |
+
Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to
|
737 |
+
zero.
|
738 |
+
"""
|
739 |
+
if not loss.is_multiclass:
|
740 |
+
# Use small values such that exp(value) is not nan.
|
741 |
+
raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])
|
742 |
+
# If link is identity, we must respect the interval of y_pred:
|
743 |
+
if isinstance(loss.link, IdentityLink):
|
744 |
+
eps = 1e-10
|
745 |
+
low = loss.interval_y_pred.low
|
746 |
+
if not loss.interval_y_pred.low_inclusive:
|
747 |
+
low = low + eps
|
748 |
+
high = loss.interval_y_pred.high
|
749 |
+
if not loss.interval_y_pred.high_inclusive:
|
750 |
+
high = high - eps
|
751 |
+
raw_prediction = np.clip(raw_prediction, low, high)
|
752 |
+
y_true = loss.link.inverse(raw_prediction)
|
753 |
+
else:
|
754 |
+
# HalfMultinomialLoss
|
755 |
+
y_true = np.arange(loss.n_classes).astype(float)
|
756 |
+
# raw_prediction with entries -exp(10), but +exp(10) on the diagonal
|
757 |
+
# this is close enough to np.inf which would produce nan
|
758 |
+
raw_prediction = np.full(
|
759 |
+
shape=(loss.n_classes, loss.n_classes),
|
760 |
+
fill_value=-np.exp(10),
|
761 |
+
dtype=float,
|
762 |
+
)
|
763 |
+
raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)
|
764 |
+
|
765 |
+
if sample_weight == "range":
|
766 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
767 |
+
|
768 |
+
loss_value = loss.loss(
|
769 |
+
y_true=y_true,
|
770 |
+
raw_prediction=raw_prediction,
|
771 |
+
sample_weight=sample_weight,
|
772 |
+
)
|
773 |
+
constant_term = loss.constant_to_optimal_zero(
|
774 |
+
y_true=y_true, sample_weight=sample_weight
|
775 |
+
)
|
776 |
+
# Comparing loss_value + constant_term to zero would result in large
|
777 |
+
# round-off errors.
|
778 |
+
assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)
|
779 |
+
|
780 |
+
|
781 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
782 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
783 |
+
def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed):
|
784 |
+
"""Test gradients and hessians with numerical derivatives.
|
785 |
+
|
786 |
+
Gradient should equal the numerical derivatives of the loss function.
|
787 |
+
Hessians should equal the numerical derivatives of gradients.
|
788 |
+
"""
|
789 |
+
n_samples = 20
|
790 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
791 |
+
loss=loss,
|
792 |
+
n_samples=n_samples,
|
793 |
+
y_bound=(-100, 100),
|
794 |
+
raw_bound=(-5, 5),
|
795 |
+
seed=global_random_seed,
|
796 |
+
)
|
797 |
+
|
798 |
+
if sample_weight == "range":
|
799 |
+
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
|
800 |
+
|
801 |
+
g, h = loss.gradient_hessian(
|
802 |
+
y_true=y_true,
|
803 |
+
raw_prediction=raw_prediction,
|
804 |
+
sample_weight=sample_weight,
|
805 |
+
)
|
806 |
+
|
807 |
+
assert g.shape == raw_prediction.shape
|
808 |
+
assert h.shape == raw_prediction.shape
|
809 |
+
|
810 |
+
if not loss.is_multiclass:
|
811 |
+
|
812 |
+
def loss_func(x):
|
813 |
+
return loss.loss(
|
814 |
+
y_true=y_true,
|
815 |
+
raw_prediction=x,
|
816 |
+
sample_weight=sample_weight,
|
817 |
+
)
|
818 |
+
|
819 |
+
g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6)
|
820 |
+
assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10)
|
821 |
+
|
822 |
+
def grad_func(x):
|
823 |
+
return loss.gradient(
|
824 |
+
y_true=y_true,
|
825 |
+
raw_prediction=x,
|
826 |
+
sample_weight=sample_weight,
|
827 |
+
)
|
828 |
+
|
829 |
+
h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6)
|
830 |
+
if loss.approx_hessian:
|
831 |
+
# TODO: What could we test if loss.approx_hessian?
|
832 |
+
pass
|
833 |
+
else:
|
834 |
+
assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10)
|
835 |
+
else:
|
836 |
+
# For multiclass loss, we should only change the predictions of the
|
837 |
+
# class for which the derivative is taken for, e.g. offset[:, k] = eps
|
838 |
+
# for class k.
|
839 |
+
# As a softmax is computed, offsetting the whole array by a constant
|
840 |
+
# would have no effect on the probabilities, and thus on the loss.
|
841 |
+
for k in range(loss.n_classes):
|
842 |
+
|
843 |
+
def loss_func(x):
|
844 |
+
raw = raw_prediction.copy()
|
845 |
+
raw[:, k] = x
|
846 |
+
return loss.loss(
|
847 |
+
y_true=y_true,
|
848 |
+
raw_prediction=raw,
|
849 |
+
sample_weight=sample_weight,
|
850 |
+
)
|
851 |
+
|
852 |
+
g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5)
|
853 |
+
assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10)
|
854 |
+
|
855 |
+
def grad_func(x):
|
856 |
+
raw = raw_prediction.copy()
|
857 |
+
raw[:, k] = x
|
858 |
+
return loss.gradient(
|
859 |
+
y_true=y_true,
|
860 |
+
raw_prediction=raw,
|
861 |
+
sample_weight=sample_weight,
|
862 |
+
)[:, k]
|
863 |
+
|
864 |
+
h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6)
|
865 |
+
if loss.approx_hessian:
|
866 |
+
# TODO: What could we test if loss.approx_hessian?
|
867 |
+
pass
|
868 |
+
else:
|
869 |
+
assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10)
|
870 |
+
|
871 |
+
|
872 |
+
@pytest.mark.parametrize(
|
873 |
+
"loss, x0, y_true",
|
874 |
+
[
|
875 |
+
("squared_error", -2.0, 42),
|
876 |
+
("squared_error", 117.0, 1.05),
|
877 |
+
("squared_error", 0.0, 0.0),
|
878 |
+
# The argmin of binomial_loss for y_true=0 and y_true=1 is resp.
|
879 |
+
# -inf and +inf due to logit, cf. "complete separation". Therefore, we
|
880 |
+
# use 0 < y_true < 1.
|
881 |
+
("binomial_loss", 0.3, 0.1),
|
882 |
+
("binomial_loss", -12, 0.2),
|
883 |
+
("binomial_loss", 30, 0.9),
|
884 |
+
("poisson_loss", 12.0, 1.0),
|
885 |
+
("poisson_loss", 0.0, 2.0),
|
886 |
+
("poisson_loss", -22.0, 10.0),
|
887 |
+
],
|
888 |
+
)
|
889 |
+
@skip_if_32bit
|
890 |
+
def test_derivatives(loss, x0, y_true):
|
891 |
+
"""Test that gradients are zero at the minimum of the loss.
|
892 |
+
|
893 |
+
We check this on a single value/sample using Halley's method with the
|
894 |
+
first and second order derivatives computed by the Loss instance.
|
895 |
+
Note that methods of Loss instances operate on arrays while the newton
|
896 |
+
root finder expects a scalar or a one-element array for this purpose.
|
897 |
+
"""
|
898 |
+
loss = _LOSSES[loss](sample_weight=None)
|
899 |
+
y_true = np.array([y_true], dtype=np.float64)
|
900 |
+
x0 = np.array([x0], dtype=np.float64)
|
901 |
+
|
902 |
+
def func(x: np.ndarray) -> np.ndarray:
|
903 |
+
"""Compute loss plus constant term.
|
904 |
+
|
905 |
+
The constant term is such that the minimum function value is zero,
|
906 |
+
which is required by the Newton method.
|
907 |
+
"""
|
908 |
+
return loss.loss(
|
909 |
+
y_true=y_true, raw_prediction=x
|
910 |
+
) + loss.constant_to_optimal_zero(y_true=y_true)
|
911 |
+
|
912 |
+
def fprime(x: np.ndarray) -> np.ndarray:
|
913 |
+
return loss.gradient(y_true=y_true, raw_prediction=x)
|
914 |
+
|
915 |
+
def fprime2(x: np.ndarray) -> np.ndarray:
|
916 |
+
return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1]
|
917 |
+
|
918 |
+
optimum = newton(
|
919 |
+
func,
|
920 |
+
x0=x0,
|
921 |
+
fprime=fprime,
|
922 |
+
fprime2=fprime2,
|
923 |
+
maxiter=100,
|
924 |
+
tol=5e-8,
|
925 |
+
)
|
926 |
+
|
927 |
+
# Need to ravel arrays because assert_allclose requires matching
|
928 |
+
# dimensions.
|
929 |
+
y_true = y_true.ravel()
|
930 |
+
optimum = optimum.ravel()
|
931 |
+
assert_allclose(loss.link.inverse(optimum), y_true)
|
932 |
+
assert_allclose(func(optimum), 0, atol=1e-14)
|
933 |
+
assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7)
|
934 |
+
|
935 |
+
|
936 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
937 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
938 |
+
def test_loss_intercept_only(loss, sample_weight):
|
939 |
+
"""Test that fit_intercept_only returns the argmin of the loss.
|
940 |
+
|
941 |
+
Also test that the gradient is zero at the minimum.
|
942 |
+
"""
|
943 |
+
n_samples = 50
|
944 |
+
if not loss.is_multiclass:
|
945 |
+
y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples))
|
946 |
+
else:
|
947 |
+
y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes
|
948 |
+
y_true[::5] = 0 # exceedance of class 0
|
949 |
+
|
950 |
+
if sample_weight == "range":
|
951 |
+
sample_weight = np.linspace(0.1, 2, num=n_samples)
|
952 |
+
|
953 |
+
a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
|
954 |
+
|
955 |
+
# find minimum by optimization
|
956 |
+
def fun(x):
|
957 |
+
if not loss.is_multiclass:
|
958 |
+
raw_prediction = np.full(shape=(n_samples), fill_value=x)
|
959 |
+
else:
|
960 |
+
raw_prediction = np.ascontiguousarray(
|
961 |
+
np.broadcast_to(x, shape=(n_samples, loss.n_classes))
|
962 |
+
)
|
963 |
+
return loss(
|
964 |
+
y_true=y_true,
|
965 |
+
raw_prediction=raw_prediction,
|
966 |
+
sample_weight=sample_weight,
|
967 |
+
)
|
968 |
+
|
969 |
+
if not loss.is_multiclass:
|
970 |
+
opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100})
|
971 |
+
grad = loss.gradient(
|
972 |
+
y_true=y_true,
|
973 |
+
raw_prediction=np.full_like(y_true, a),
|
974 |
+
sample_weight=sample_weight,
|
975 |
+
)
|
976 |
+
assert a.shape == tuple() # scalar
|
977 |
+
assert a.dtype == y_true.dtype
|
978 |
+
assert_all_finite(a)
|
979 |
+
a == approx(opt.x, rel=1e-7)
|
980 |
+
grad.sum() == approx(0, abs=1e-12)
|
981 |
+
else:
|
982 |
+
# The constraint corresponds to sum(raw_prediction) = 0. Without it, we would
|
983 |
+
# need to apply loss.symmetrize_raw_prediction to opt.x before comparing.
|
984 |
+
opt = minimize(
|
985 |
+
fun,
|
986 |
+
np.zeros((loss.n_classes)),
|
987 |
+
tol=1e-13,
|
988 |
+
options={"maxiter": 100},
|
989 |
+
method="SLSQP",
|
990 |
+
constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0),
|
991 |
+
)
|
992 |
+
grad = loss.gradient(
|
993 |
+
y_true=y_true,
|
994 |
+
raw_prediction=np.tile(a, (n_samples, 1)),
|
995 |
+
sample_weight=sample_weight,
|
996 |
+
)
|
997 |
+
assert a.dtype == y_true.dtype
|
998 |
+
assert_all_finite(a)
|
999 |
+
assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12)
|
1000 |
+
assert_allclose(grad.sum(axis=0), 0, atol=1e-12)
|
1001 |
+
|
1002 |
+
|
1003 |
+
@pytest.mark.parametrize(
|
1004 |
+
"loss, func, random_dist",
|
1005 |
+
[
|
1006 |
+
(HalfSquaredError(), np.mean, "normal"),
|
1007 |
+
(AbsoluteError(), np.median, "normal"),
|
1008 |
+
(PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"),
|
1009 |
+
(HalfPoissonLoss(), np.mean, "poisson"),
|
1010 |
+
(HalfGammaLoss(), np.mean, "exponential"),
|
1011 |
+
(HalfTweedieLoss(), np.mean, "exponential"),
|
1012 |
+
(HalfBinomialLoss(), np.mean, "binomial"),
|
1013 |
+
],
|
1014 |
+
)
|
1015 |
+
def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed):
|
1016 |
+
"""Test that fit_intercept_only returns the correct functional.
|
1017 |
+
|
1018 |
+
We test the functional for specific, meaningful distributions, e.g.
|
1019 |
+
squared error estimates the expectation of a probability distribution.
|
1020 |
+
"""
|
1021 |
+
rng = np.random.RandomState(global_random_seed)
|
1022 |
+
if random_dist == "binomial":
|
1023 |
+
y_train = rng.binomial(1, 0.5, size=100)
|
1024 |
+
else:
|
1025 |
+
y_train = getattr(rng, random_dist)(size=100)
|
1026 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
1027 |
+
# Make sure baseline prediction is the expected functional=func, e.g. mean
|
1028 |
+
# or median.
|
1029 |
+
assert_all_finite(baseline_prediction)
|
1030 |
+
assert baseline_prediction == approx(loss.link.link(func(y_train)))
|
1031 |
+
assert loss.link.inverse(baseline_prediction) == approx(func(y_train))
|
1032 |
+
if isinstance(loss, IdentityLink):
|
1033 |
+
assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction)
|
1034 |
+
|
1035 |
+
# Test baseline at boundary
|
1036 |
+
if loss.interval_y_true.low_inclusive:
|
1037 |
+
y_train.fill(loss.interval_y_true.low)
|
1038 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
1039 |
+
assert_all_finite(baseline_prediction)
|
1040 |
+
if loss.interval_y_true.high_inclusive:
|
1041 |
+
y_train.fill(loss.interval_y_true.high)
|
1042 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
1043 |
+
assert_all_finite(baseline_prediction)
|
1044 |
+
|
1045 |
+
|
1046 |
+
def test_multinomial_loss_fit_intercept_only():
|
1047 |
+
"""Test that fit_intercept_only returns the mean functional for CCE."""
|
1048 |
+
rng = np.random.RandomState(0)
|
1049 |
+
n_classes = 4
|
1050 |
+
loss = HalfMultinomialLoss(n_classes=n_classes)
|
1051 |
+
# Same logic as test_specific_fit_intercept_only. Here inverse link
|
1052 |
+
# function = softmax and link function = log - symmetry term.
|
1053 |
+
y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64)
|
1054 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
1055 |
+
assert baseline_prediction.shape == (n_classes,)
|
1056 |
+
p = np.zeros(n_classes, dtype=y_train.dtype)
|
1057 |
+
for k in range(n_classes):
|
1058 |
+
p[k] = (y_train == k).mean()
|
1059 |
+
assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p)))
|
1060 |
+
assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :]))
|
1061 |
+
|
1062 |
+
for y_train in (np.zeros(shape=10), np.ones(shape=10)):
|
1063 |
+
y_train = y_train.astype(np.float64)
|
1064 |
+
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
|
1065 |
+
assert baseline_prediction.dtype == y_train.dtype
|
1066 |
+
assert_all_finite(baseline_prediction)
|
1067 |
+
|
1068 |
+
|
1069 |
+
def test_binomial_and_multinomial_loss(global_random_seed):
|
1070 |
+
"""Test that multinomial loss with n_classes = 2 is the same as binomial loss."""
|
1071 |
+
rng = np.random.RandomState(global_random_seed)
|
1072 |
+
n_samples = 20
|
1073 |
+
binom = HalfBinomialLoss()
|
1074 |
+
multinom = HalfMultinomialLoss(n_classes=2)
|
1075 |
+
y_train = rng.randint(0, 2, size=n_samples).astype(np.float64)
|
1076 |
+
raw_prediction = rng.normal(size=n_samples)
|
1077 |
+
raw_multinom = np.empty((n_samples, 2))
|
1078 |
+
raw_multinom[:, 0] = -0.5 * raw_prediction
|
1079 |
+
raw_multinom[:, 1] = 0.5 * raw_prediction
|
1080 |
+
assert_allclose(
|
1081 |
+
binom.loss(y_true=y_train, raw_prediction=raw_prediction),
|
1082 |
+
multinom.loss(y_true=y_train, raw_prediction=raw_multinom),
|
1083 |
+
)
|
1084 |
+
|
1085 |
+
|
1086 |
+
@pytest.mark.parametrize("y_true", (np.array([0.0, 0, 0]), np.array([1.0, 1, 1])))
|
1087 |
+
@pytest.mark.parametrize("y_pred", (np.array([-5.0, -5, -5]), np.array([3.0, 3, 3])))
|
1088 |
+
def test_binomial_vs_alternative_formulation(y_true, y_pred, global_dtype):
|
1089 |
+
"""Test that both formulations of the binomial deviance agree.
|
1090 |
+
|
1091 |
+
Often, the binomial deviance or log loss is written in terms of a variable
|
1092 |
+
z in {-1, +1}, but we use y in {0, 1}, hence z = 2 * y - 1.
|
1093 |
+
ESL II Eq. (10.18):
|
1094 |
+
|
1095 |
+
-loglike(z, f) = log(1 + exp(-2 * z * f))
|
1096 |
+
|
1097 |
+
Note:
|
1098 |
+
- ESL 2*f = raw_prediction, hence the factor 2 of ESL disappears.
|
1099 |
+
- Deviance = -2*loglike + .., but HalfBinomialLoss is half of the
|
1100 |
+
deviance, hence the factor of 2 cancels in the comparison.
|
1101 |
+
"""
|
1102 |
+
|
1103 |
+
def alt_loss(y, raw_pred):
|
1104 |
+
z = 2 * y - 1
|
1105 |
+
return np.mean(np.log(1 + np.exp(-z * raw_pred)))
|
1106 |
+
|
1107 |
+
def alt_gradient(y, raw_pred):
|
1108 |
+
# alternative gradient formula according to ESL
|
1109 |
+
z = 2 * y - 1
|
1110 |
+
return -z / (1 + np.exp(z * raw_pred))
|
1111 |
+
|
1112 |
+
bin_loss = HalfBinomialLoss()
|
1113 |
+
|
1114 |
+
y_true = y_true.astype(global_dtype)
|
1115 |
+
y_pred = y_pred.astype(global_dtype)
|
1116 |
+
datum = (y_true, y_pred)
|
1117 |
+
|
1118 |
+
assert bin_loss(*datum) == approx(alt_loss(*datum))
|
1119 |
+
assert_allclose(bin_loss.gradient(*datum), alt_gradient(*datum))
|
1120 |
+
|
1121 |
+
|
1122 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
1123 |
+
def test_predict_proba(loss, global_random_seed):
|
1124 |
+
"""Test that predict_proba and gradient_proba work as expected."""
|
1125 |
+
n_samples = 20
|
1126 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
1127 |
+
loss=loss,
|
1128 |
+
n_samples=n_samples,
|
1129 |
+
y_bound=(-100, 100),
|
1130 |
+
raw_bound=(-5, 5),
|
1131 |
+
seed=global_random_seed,
|
1132 |
+
)
|
1133 |
+
|
1134 |
+
if hasattr(loss, "predict_proba"):
|
1135 |
+
proba = loss.predict_proba(raw_prediction)
|
1136 |
+
assert proba.shape == (n_samples, loss.n_classes)
|
1137 |
+
assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
|
1138 |
+
|
1139 |
+
if hasattr(loss, "gradient_proba"):
|
1140 |
+
for grad, proba in (
|
1141 |
+
(None, None),
|
1142 |
+
(None, np.empty_like(raw_prediction)),
|
1143 |
+
(np.empty_like(raw_prediction), None),
|
1144 |
+
(np.empty_like(raw_prediction), np.empty_like(raw_prediction)),
|
1145 |
+
):
|
1146 |
+
grad, proba = loss.gradient_proba(
|
1147 |
+
y_true=y_true,
|
1148 |
+
raw_prediction=raw_prediction,
|
1149 |
+
sample_weight=None,
|
1150 |
+
gradient_out=grad,
|
1151 |
+
proba_out=proba,
|
1152 |
+
)
|
1153 |
+
assert proba.shape == (n_samples, loss.n_classes)
|
1154 |
+
assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
|
1155 |
+
assert_allclose(
|
1156 |
+
grad,
|
1157 |
+
loss.gradient(
|
1158 |
+
y_true=y_true,
|
1159 |
+
raw_prediction=raw_prediction,
|
1160 |
+
sample_weight=None,
|
1161 |
+
gradient_out=None,
|
1162 |
+
),
|
1163 |
+
)
|
1164 |
+
|
1165 |
+
|
1166 |
+
@pytest.mark.parametrize("loss", ALL_LOSSES)
|
1167 |
+
@pytest.mark.parametrize("sample_weight", [None, "range"])
|
1168 |
+
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
|
1169 |
+
@pytest.mark.parametrize("order", ("C", "F"))
|
1170 |
+
def test_init_gradient_and_hessians(loss, sample_weight, dtype, order):
|
1171 |
+
"""Test that init_gradient_and_hessian works as expected.
|
1172 |
+
|
1173 |
+
passing sample_weight to a loss correctly influences the constant_hessian
|
1174 |
+
attribute, and consequently the shape of the hessian array.
|
1175 |
+
"""
|
1176 |
+
n_samples = 5
|
1177 |
+
if sample_weight == "range":
|
1178 |
+
sample_weight = np.ones(n_samples)
|
1179 |
+
loss = loss(sample_weight=sample_weight)
|
1180 |
+
gradient, hessian = loss.init_gradient_and_hessian(
|
1181 |
+
n_samples=n_samples,
|
1182 |
+
dtype=dtype,
|
1183 |
+
order=order,
|
1184 |
+
)
|
1185 |
+
if loss.constant_hessian:
|
1186 |
+
assert gradient.shape == (n_samples,)
|
1187 |
+
assert hessian.shape == (1,)
|
1188 |
+
elif loss.is_multiclass:
|
1189 |
+
assert gradient.shape == (n_samples, loss.n_classes)
|
1190 |
+
assert hessian.shape == (n_samples, loss.n_classes)
|
1191 |
+
else:
|
1192 |
+
assert hessian.shape == (n_samples,)
|
1193 |
+
assert hessian.shape == (n_samples,)
|
1194 |
+
|
1195 |
+
assert gradient.dtype == dtype
|
1196 |
+
assert hessian.dtype == dtype
|
1197 |
+
|
1198 |
+
if order == "C":
|
1199 |
+
assert gradient.flags.c_contiguous
|
1200 |
+
assert hessian.flags.c_contiguous
|
1201 |
+
else:
|
1202 |
+
assert gradient.flags.f_contiguous
|
1203 |
+
assert hessian.flags.f_contiguous
|
1204 |
+
|
1205 |
+
|
1206 |
+
@pytest.mark.parametrize("loss", ALL_LOSSES)
|
1207 |
+
@pytest.mark.parametrize(
|
1208 |
+
"params, err_msg",
|
1209 |
+
[
|
1210 |
+
(
|
1211 |
+
{"dtype": np.int64},
|
1212 |
+
f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.",
|
1213 |
+
),
|
1214 |
+
],
|
1215 |
+
)
|
1216 |
+
def test_init_gradient_and_hessian_raises(loss, params, err_msg):
|
1217 |
+
"""Test that init_gradient_and_hessian raises errors for invalid input."""
|
1218 |
+
loss = loss()
|
1219 |
+
with pytest.raises((ValueError, TypeError), match=err_msg):
|
1220 |
+
gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params)
|
1221 |
+
|
1222 |
+
|
1223 |
+
@pytest.mark.parametrize(
|
1224 |
+
"loss, params, err_type, err_msg",
|
1225 |
+
[
|
1226 |
+
(
|
1227 |
+
PinballLoss,
|
1228 |
+
{"quantile": None},
|
1229 |
+
TypeError,
|
1230 |
+
"quantile must be an instance of float, not NoneType.",
|
1231 |
+
),
|
1232 |
+
(
|
1233 |
+
PinballLoss,
|
1234 |
+
{"quantile": 0},
|
1235 |
+
ValueError,
|
1236 |
+
"quantile == 0, must be > 0.",
|
1237 |
+
),
|
1238 |
+
(PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."),
|
1239 |
+
(
|
1240 |
+
HuberLoss,
|
1241 |
+
{"quantile": None},
|
1242 |
+
TypeError,
|
1243 |
+
"quantile must be an instance of float, not NoneType.",
|
1244 |
+
),
|
1245 |
+
(
|
1246 |
+
HuberLoss,
|
1247 |
+
{"quantile": 0},
|
1248 |
+
ValueError,
|
1249 |
+
"quantile == 0, must be > 0.",
|
1250 |
+
),
|
1251 |
+
(HuberLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."),
|
1252 |
+
],
|
1253 |
+
)
|
1254 |
+
def test_loss_init_parameter_validation(loss, params, err_type, err_msg):
|
1255 |
+
"""Test that loss raises errors for invalid input."""
|
1256 |
+
with pytest.raises(err_type, match=err_msg):
|
1257 |
+
loss(**params)
|
1258 |
+
|
1259 |
+
|
1260 |
+
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
|
1261 |
+
def test_loss_pickle(loss):
|
1262 |
+
"""Test that losses can be pickled."""
|
1263 |
+
n_samples = 20
|
1264 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
1265 |
+
loss=loss,
|
1266 |
+
n_samples=n_samples,
|
1267 |
+
y_bound=(-100, 100),
|
1268 |
+
raw_bound=(-5, 5),
|
1269 |
+
seed=42,
|
1270 |
+
)
|
1271 |
+
pickled_loss = pickle.dumps(loss)
|
1272 |
+
unpickled_loss = pickle.loads(pickled_loss)
|
1273 |
+
assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx(
|
1274 |
+
unpickled_loss(y_true=y_true, raw_prediction=raw_prediction)
|
1275 |
+
)
|
1276 |
+
|
1277 |
+
|
1278 |
+
@pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3])
|
1279 |
+
def test_tweedie_log_identity_consistency(p):
|
1280 |
+
"""Test for identical losses when only the link function is different."""
|
1281 |
+
half_tweedie_log = HalfTweedieLoss(power=p)
|
1282 |
+
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
|
1283 |
+
n_samples = 10
|
1284 |
+
y_true, raw_prediction = random_y_true_raw_prediction(
|
1285 |
+
loss=half_tweedie_log, n_samples=n_samples, seed=42
|
1286 |
+
)
|
1287 |
+
y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)
|
1288 |
+
|
1289 |
+
# Let's compare the loss values, up to some constant term that is dropped
|
1290 |
+
# in HalfTweedieLoss but not in HalfTweedieLossIdentity.
|
1291 |
+
loss_log = half_tweedie_log.loss(
|
1292 |
+
y_true=y_true, raw_prediction=raw_prediction
|
1293 |
+
) + half_tweedie_log.constant_to_optimal_zero(y_true)
|
1294 |
+
loss_identity = half_tweedie_identity.loss(
|
1295 |
+
y_true=y_true, raw_prediction=y_pred
|
1296 |
+
) + half_tweedie_identity.constant_to_optimal_zero(y_true)
|
1297 |
+
# Note that HalfTweedieLoss ignores different constant terms than
|
1298 |
+
# HalfTweedieLossIdentity. Constant terms means terms not depending on
|
1299 |
+
# raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses
|
1300 |
+
# give the same values.
|
1301 |
+
assert_allclose(loss_log, loss_identity)
|
1302 |
+
|
1303 |
+
# For gradients and hessians, the constant terms do not matter. We have, however,
|
1304 |
+
# to account for the chain rule, i.e. with x=raw_prediction
|
1305 |
+
# gradient_log(x) = d/dx loss_log(x)
|
1306 |
+
# = d/dx loss_identity(exp(x))
|
1307 |
+
# = exp(x) * gradient_identity(exp(x))
|
1308 |
+
# Similarly,
|
1309 |
+
# hessian_log(x) = exp(x) * gradient_identity(exp(x))
|
1310 |
+
# + exp(x)**2 * hessian_identity(x)
|
1311 |
+
gradient_log, hessian_log = half_tweedie_log.gradient_hessian(
|
1312 |
+
y_true=y_true, raw_prediction=raw_prediction
|
1313 |
+
)
|
1314 |
+
gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(
|
1315 |
+
y_true=y_true, raw_prediction=y_pred
|
1316 |
+
)
|
1317 |
+
assert_allclose(gradient_log, y_pred * gradient_identity)
|
1318 |
+
assert_allclose(
|
1319 |
+
hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity
|
1320 |
+
)
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Meta-estimators for building composite models with transformers
|
2 |
+
|
3 |
+
In addition to its current contents, this module will eventually be home to
|
4 |
+
refurbished versions of Pipeline and FeatureUnion.
|
5 |
+
|
6 |
+
"""
|
7 |
+
|
8 |
+
from ._column_transformer import (
|
9 |
+
ColumnTransformer,
|
10 |
+
make_column_selector,
|
11 |
+
make_column_transformer,
|
12 |
+
)
|
13 |
+
from ._target import TransformedTargetRegressor
|
14 |
+
|
15 |
+
__all__ = [
|
16 |
+
"ColumnTransformer",
|
17 |
+
"make_column_transformer",
|
18 |
+
"TransformedTargetRegressor",
|
19 |
+
"make_column_selector",
|
20 |
+
]
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (615 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_column_transformer.cpython-310.pyc
ADDED
Binary file (49.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/__pycache__/_target.cpython-310.pyc
ADDED
Binary file (9.68 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/_column_transformer.py
ADDED
@@ -0,0 +1,1463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn.compose._column_transformer` module implements utilities
|
3 |
+
to work with heterogeneous data and to apply different transformers to
|
4 |
+
different columns.
|
5 |
+
"""
|
6 |
+
|
7 |
+
# Author: Andreas Mueller
|
8 |
+
# Joris Van den Bossche
|
9 |
+
# License: BSD
|
10 |
+
import warnings
|
11 |
+
from collections import Counter
|
12 |
+
from itertools import chain
|
13 |
+
from numbers import Integral, Real
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
from scipy import sparse
|
17 |
+
|
18 |
+
from ..base import TransformerMixin, _fit_context, clone
|
19 |
+
from ..pipeline import _fit_transform_one, _name_estimators, _transform_one
|
20 |
+
from ..preprocessing import FunctionTransformer
|
21 |
+
from ..utils import Bunch, _get_column_indices, _safe_indexing
|
22 |
+
from ..utils._estimator_html_repr import _VisualBlock
|
23 |
+
from ..utils._metadata_requests import METHODS
|
24 |
+
from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions
|
25 |
+
from ..utils._set_output import (
|
26 |
+
_get_container_adapter,
|
27 |
+
_get_output_config,
|
28 |
+
_safe_set_output,
|
29 |
+
)
|
30 |
+
from ..utils.metadata_routing import (
|
31 |
+
MetadataRouter,
|
32 |
+
MethodMapping,
|
33 |
+
_raise_for_params,
|
34 |
+
_routing_enabled,
|
35 |
+
process_routing,
|
36 |
+
)
|
37 |
+
from ..utils.metaestimators import _BaseComposition
|
38 |
+
from ..utils.parallel import Parallel, delayed
|
39 |
+
from ..utils.validation import (
|
40 |
+
_check_feature_names_in,
|
41 |
+
_get_feature_names,
|
42 |
+
_is_pandas_df,
|
43 |
+
_num_samples,
|
44 |
+
check_array,
|
45 |
+
check_is_fitted,
|
46 |
+
)
|
47 |
+
|
48 |
+
__all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"]
|
49 |
+
|
50 |
+
|
51 |
+
_ERR_MSG_1DCOLUMN = (
|
52 |
+
"1D data passed to a transformer that expects 2D data. "
|
53 |
+
"Try to specify the column selection as a list of one "
|
54 |
+
"item instead of a scalar."
|
55 |
+
)
|
56 |
+
|
57 |
+
|
58 |
+
class ColumnTransformer(TransformerMixin, _BaseComposition):
|
59 |
+
"""Applies transformers to columns of an array or pandas DataFrame.
|
60 |
+
|
61 |
+
This estimator allows different columns or column subsets of the input
|
62 |
+
to be transformed separately and the features generated by each transformer
|
63 |
+
will be concatenated to form a single feature space.
|
64 |
+
This is useful for heterogeneous or columnar data, to combine several
|
65 |
+
feature extraction mechanisms or transformations into a single transformer.
|
66 |
+
|
67 |
+
Read more in the :ref:`User Guide <column_transformer>`.
|
68 |
+
|
69 |
+
.. versionadded:: 0.20
|
70 |
+
|
71 |
+
Parameters
|
72 |
+
----------
|
73 |
+
transformers : list of tuples
|
74 |
+
List of (name, transformer, columns) tuples specifying the
|
75 |
+
transformer objects to be applied to subsets of the data.
|
76 |
+
|
77 |
+
name : str
|
78 |
+
Like in Pipeline and FeatureUnion, this allows the transformer and
|
79 |
+
its parameters to be set using ``set_params`` and searched in grid
|
80 |
+
search.
|
81 |
+
transformer : {'drop', 'passthrough'} or estimator
|
82 |
+
Estimator must support :term:`fit` and :term:`transform`.
|
83 |
+
Special-cased strings 'drop' and 'passthrough' are accepted as
|
84 |
+
well, to indicate to drop the columns or to pass them through
|
85 |
+
untransformed, respectively.
|
86 |
+
columns : str, array-like of str, int, array-like of int, \
|
87 |
+
array-like of bool, slice or callable
|
88 |
+
Indexes the data on its second axis. Integers are interpreted as
|
89 |
+
positional columns, while strings can reference DataFrame columns
|
90 |
+
by name. A scalar string or int should be used where
|
91 |
+
``transformer`` expects X to be a 1d array-like (vector),
|
92 |
+
otherwise a 2d array will be passed to the transformer.
|
93 |
+
A callable is passed the input data `X` and can return any of the
|
94 |
+
above. To select multiple columns by name or dtype, you can use
|
95 |
+
:obj:`make_column_selector`.
|
96 |
+
|
97 |
+
remainder : {'drop', 'passthrough'} or estimator, default='drop'
|
98 |
+
By default, only the specified columns in `transformers` are
|
99 |
+
transformed and combined in the output, and the non-specified
|
100 |
+
columns are dropped. (default of ``'drop'``).
|
101 |
+
By specifying ``remainder='passthrough'``, all remaining columns that
|
102 |
+
were not specified in `transformers`, but present in the data passed
|
103 |
+
to `fit` will be automatically passed through. This subset of columns
|
104 |
+
is concatenated with the output of the transformers. For dataframes,
|
105 |
+
extra columns not seen during `fit` will be excluded from the output
|
106 |
+
of `transform`.
|
107 |
+
By setting ``remainder`` to be an estimator, the remaining
|
108 |
+
non-specified columns will use the ``remainder`` estimator. The
|
109 |
+
estimator must support :term:`fit` and :term:`transform`.
|
110 |
+
Note that using this feature requires that the DataFrame columns
|
111 |
+
input at :term:`fit` and :term:`transform` have identical order.
|
112 |
+
|
113 |
+
sparse_threshold : float, default=0.3
|
114 |
+
If the output of the different transformers contains sparse matrices,
|
115 |
+
these will be stacked as a sparse matrix if the overall density is
|
116 |
+
lower than this value. Use ``sparse_threshold=0`` to always return
|
117 |
+
dense. When the transformed output consists of all dense data, the
|
118 |
+
stacked result will be dense, and this keyword will be ignored.
|
119 |
+
|
120 |
+
n_jobs : int, default=None
|
121 |
+
Number of jobs to run in parallel.
|
122 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
123 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
124 |
+
for more details.
|
125 |
+
|
126 |
+
transformer_weights : dict, default=None
|
127 |
+
Multiplicative weights for features per transformer. The output of the
|
128 |
+
transformer is multiplied by these weights. Keys are transformer names,
|
129 |
+
values the weights.
|
130 |
+
|
131 |
+
verbose : bool, default=False
|
132 |
+
If True, the time elapsed while fitting each transformer will be
|
133 |
+
printed as it is completed.
|
134 |
+
|
135 |
+
verbose_feature_names_out : bool, default=True
|
136 |
+
If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
|
137 |
+
all feature names with the name of the transformer that generated that
|
138 |
+
feature.
|
139 |
+
If False, :meth:`ColumnTransformer.get_feature_names_out` will not
|
140 |
+
prefix any feature names and will error if feature names are not
|
141 |
+
unique.
|
142 |
+
|
143 |
+
.. versionadded:: 1.0
|
144 |
+
|
145 |
+
Attributes
|
146 |
+
----------
|
147 |
+
transformers_ : list
|
148 |
+
The collection of fitted transformers as tuples of (name,
|
149 |
+
fitted_transformer, column). `fitted_transformer` can be an estimator,
|
150 |
+
or `'drop'`; `'passthrough'` is replaced with an equivalent
|
151 |
+
:class:`~sklearn.preprocessing.FunctionTransformer`. In case there were
|
152 |
+
no columns selected, this will be the unfitted transformer. If there
|
153 |
+
are remaining columns, the final element is a tuple of the form:
|
154 |
+
('remainder', transformer, remaining_columns) corresponding to the
|
155 |
+
``remainder`` parameter. If there are remaining columns, then
|
156 |
+
``len(transformers_)==len(transformers)+1``, otherwise
|
157 |
+
``len(transformers_)==len(transformers)``.
|
158 |
+
|
159 |
+
named_transformers_ : :class:`~sklearn.utils.Bunch`
|
160 |
+
Read-only attribute to access any transformer by given name.
|
161 |
+
Keys are transformer names and values are the fitted transformer
|
162 |
+
objects.
|
163 |
+
|
164 |
+
sparse_output_ : bool
|
165 |
+
Boolean flag indicating whether the output of ``transform`` is a
|
166 |
+
sparse matrix or a dense numpy array, which depends on the output
|
167 |
+
of the individual transformers and the `sparse_threshold` keyword.
|
168 |
+
|
169 |
+
output_indices_ : dict
|
170 |
+
A dictionary from each transformer name to a slice, where the slice
|
171 |
+
corresponds to indices in the transformed output. This is useful to
|
172 |
+
inspect which transformer is responsible for which transformed
|
173 |
+
feature(s).
|
174 |
+
|
175 |
+
.. versionadded:: 1.0
|
176 |
+
|
177 |
+
n_features_in_ : int
|
178 |
+
Number of features seen during :term:`fit`. Only defined if the
|
179 |
+
underlying transformers expose such an attribute when fit.
|
180 |
+
|
181 |
+
.. versionadded:: 0.24
|
182 |
+
|
183 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
184 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
185 |
+
has feature names that are all strings.
|
186 |
+
|
187 |
+
.. versionadded:: 1.0
|
188 |
+
|
189 |
+
See Also
|
190 |
+
--------
|
191 |
+
make_column_transformer : Convenience function for
|
192 |
+
combining the outputs of multiple transformer objects applied to
|
193 |
+
column subsets of the original feature space.
|
194 |
+
make_column_selector : Convenience function for selecting
|
195 |
+
columns based on datatype or the columns name with a regex pattern.
|
196 |
+
|
197 |
+
Notes
|
198 |
+
-----
|
199 |
+
The order of the columns in the transformed feature matrix follows the
|
200 |
+
order of how the columns are specified in the `transformers` list.
|
201 |
+
Columns of the original feature matrix that are not specified are
|
202 |
+
dropped from the resulting transformed feature matrix, unless specified
|
203 |
+
in the `passthrough` keyword. Those columns specified with `passthrough`
|
204 |
+
are added at the right to the output of the transformers.
|
205 |
+
|
206 |
+
Examples
|
207 |
+
--------
|
208 |
+
>>> import numpy as np
|
209 |
+
>>> from sklearn.compose import ColumnTransformer
|
210 |
+
>>> from sklearn.preprocessing import Normalizer
|
211 |
+
>>> ct = ColumnTransformer(
|
212 |
+
... [("norm1", Normalizer(norm='l1'), [0, 1]),
|
213 |
+
... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
|
214 |
+
>>> X = np.array([[0., 1., 2., 2.],
|
215 |
+
... [1., 1., 0., 1.]])
|
216 |
+
>>> # Normalizer scales each row of X to unit norm. A separate scaling
|
217 |
+
>>> # is applied for the two first and two last elements of each
|
218 |
+
>>> # row independently.
|
219 |
+
>>> ct.fit_transform(X)
|
220 |
+
array([[0. , 1. , 0.5, 0.5],
|
221 |
+
[0.5, 0.5, 0. , 1. ]])
|
222 |
+
|
223 |
+
:class:`ColumnTransformer` can be configured with a transformer that requires
|
224 |
+
a 1d array by setting the column to a string:
|
225 |
+
|
226 |
+
>>> from sklearn.feature_extraction.text import CountVectorizer
|
227 |
+
>>> from sklearn.preprocessing import MinMaxScaler
|
228 |
+
>>> import pandas as pd # doctest: +SKIP
|
229 |
+
>>> X = pd.DataFrame({
|
230 |
+
... "documents": ["First item", "second one here", "Is this the last?"],
|
231 |
+
... "width": [3, 4, 5],
|
232 |
+
... }) # doctest: +SKIP
|
233 |
+
>>> # "documents" is a string which configures ColumnTransformer to
|
234 |
+
>>> # pass the documents column as a 1d array to the CountVectorizer
|
235 |
+
>>> ct = ColumnTransformer(
|
236 |
+
... [("text_preprocess", CountVectorizer(), "documents"),
|
237 |
+
... ("num_preprocess", MinMaxScaler(), ["width"])])
|
238 |
+
>>> X_trans = ct.fit_transform(X) # doctest: +SKIP
|
239 |
+
|
240 |
+
For a more detailed example of usage, see
|
241 |
+
:ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
|
242 |
+
"""
|
243 |
+
|
244 |
+
_required_parameters = ["transformers"]
|
245 |
+
|
246 |
+
_parameter_constraints: dict = {
|
247 |
+
"transformers": [list, Hidden(tuple)],
|
248 |
+
"remainder": [
|
249 |
+
StrOptions({"drop", "passthrough"}),
|
250 |
+
HasMethods(["fit", "transform"]),
|
251 |
+
HasMethods(["fit_transform", "transform"]),
|
252 |
+
],
|
253 |
+
"sparse_threshold": [Interval(Real, 0, 1, closed="both")],
|
254 |
+
"n_jobs": [Integral, None],
|
255 |
+
"transformer_weights": [dict, None],
|
256 |
+
"verbose": ["verbose"],
|
257 |
+
"verbose_feature_names_out": ["boolean"],
|
258 |
+
}
|
259 |
+
|
260 |
+
def __init__(
|
261 |
+
self,
|
262 |
+
transformers,
|
263 |
+
*,
|
264 |
+
remainder="drop",
|
265 |
+
sparse_threshold=0.3,
|
266 |
+
n_jobs=None,
|
267 |
+
transformer_weights=None,
|
268 |
+
verbose=False,
|
269 |
+
verbose_feature_names_out=True,
|
270 |
+
):
|
271 |
+
self.transformers = transformers
|
272 |
+
self.remainder = remainder
|
273 |
+
self.sparse_threshold = sparse_threshold
|
274 |
+
self.n_jobs = n_jobs
|
275 |
+
self.transformer_weights = transformer_weights
|
276 |
+
self.verbose = verbose
|
277 |
+
self.verbose_feature_names_out = verbose_feature_names_out
|
278 |
+
|
279 |
+
@property
|
280 |
+
def _transformers(self):
|
281 |
+
"""
|
282 |
+
Internal list of transformer only containing the name and
|
283 |
+
transformers, dropping the columns.
|
284 |
+
|
285 |
+
DO NOT USE: This is for the implementation of get_params via
|
286 |
+
BaseComposition._get_params which expects lists of tuples of len 2.
|
287 |
+
|
288 |
+
To iterate through the transformers, use ``self._iter`` instead.
|
289 |
+
"""
|
290 |
+
try:
|
291 |
+
return [(name, trans) for name, trans, _ in self.transformers]
|
292 |
+
except (TypeError, ValueError):
|
293 |
+
return self.transformers
|
294 |
+
|
295 |
+
@_transformers.setter
|
296 |
+
def _transformers(self, value):
|
297 |
+
"""DO NOT USE: This is for the implementation of set_params via
|
298 |
+
BaseComposition._get_params which gives lists of tuples of len 2.
|
299 |
+
"""
|
300 |
+
try:
|
301 |
+
self.transformers = [
|
302 |
+
(name, trans, col)
|
303 |
+
for ((name, trans), (_, _, col)) in zip(value, self.transformers)
|
304 |
+
]
|
305 |
+
except (TypeError, ValueError):
|
306 |
+
self.transformers = value
|
307 |
+
|
308 |
+
def set_output(self, *, transform=None):
|
309 |
+
"""Set the output container when `"transform"` and `"fit_transform"` are called.
|
310 |
+
|
311 |
+
Calling `set_output` will set the output of all estimators in `transformers`
|
312 |
+
and `transformers_`.
|
313 |
+
|
314 |
+
Parameters
|
315 |
+
----------
|
316 |
+
transform : {"default", "pandas"}, default=None
|
317 |
+
Configure output of `transform` and `fit_transform`.
|
318 |
+
|
319 |
+
- `"default"`: Default output format of a transformer
|
320 |
+
- `"pandas"`: DataFrame output
|
321 |
+
- `"polars"`: Polars output
|
322 |
+
- `None`: Transform configuration is unchanged
|
323 |
+
|
324 |
+
.. versionadded:: 1.4
|
325 |
+
`"polars"` option was added.
|
326 |
+
|
327 |
+
Returns
|
328 |
+
-------
|
329 |
+
self : estimator instance
|
330 |
+
Estimator instance.
|
331 |
+
"""
|
332 |
+
super().set_output(transform=transform)
|
333 |
+
|
334 |
+
transformers = (
|
335 |
+
trans
|
336 |
+
for _, trans, _ in chain(
|
337 |
+
self.transformers, getattr(self, "transformers_", [])
|
338 |
+
)
|
339 |
+
if trans not in {"passthrough", "drop"}
|
340 |
+
)
|
341 |
+
for trans in transformers:
|
342 |
+
_safe_set_output(trans, transform=transform)
|
343 |
+
|
344 |
+
if self.remainder not in {"passthrough", "drop"}:
|
345 |
+
_safe_set_output(self.remainder, transform=transform)
|
346 |
+
|
347 |
+
return self
|
348 |
+
|
349 |
+
def get_params(self, deep=True):
|
350 |
+
"""Get parameters for this estimator.
|
351 |
+
|
352 |
+
Returns the parameters given in the constructor as well as the
|
353 |
+
estimators contained within the `transformers` of the
|
354 |
+
`ColumnTransformer`.
|
355 |
+
|
356 |
+
Parameters
|
357 |
+
----------
|
358 |
+
deep : bool, default=True
|
359 |
+
If True, will return the parameters for this estimator and
|
360 |
+
contained subobjects that are estimators.
|
361 |
+
|
362 |
+
Returns
|
363 |
+
-------
|
364 |
+
params : dict
|
365 |
+
Parameter names mapped to their values.
|
366 |
+
"""
|
367 |
+
return self._get_params("_transformers", deep=deep)
|
368 |
+
|
369 |
+
def set_params(self, **kwargs):
|
370 |
+
"""Set the parameters of this estimator.
|
371 |
+
|
372 |
+
Valid parameter keys can be listed with ``get_params()``. Note that you
|
373 |
+
can directly set the parameters of the estimators contained in
|
374 |
+
`transformers` of `ColumnTransformer`.
|
375 |
+
|
376 |
+
Parameters
|
377 |
+
----------
|
378 |
+
**kwargs : dict
|
379 |
+
Estimator parameters.
|
380 |
+
|
381 |
+
Returns
|
382 |
+
-------
|
383 |
+
self : ColumnTransformer
|
384 |
+
This estimator.
|
385 |
+
"""
|
386 |
+
self._set_params("_transformers", **kwargs)
|
387 |
+
return self
|
388 |
+
|
389 |
+
def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):
|
390 |
+
"""
|
391 |
+
Generate (name, trans, column, weight) tuples.
|
392 |
+
|
393 |
+
|
394 |
+
Parameters
|
395 |
+
----------
|
396 |
+
fitted : bool
|
397 |
+
If True, use the fitted transformers (``self.transformers_``) to
|
398 |
+
iterate through transformers, else use the transformers passed by
|
399 |
+
the user (``self.transformers``).
|
400 |
+
|
401 |
+
column_as_labels : bool
|
402 |
+
If True, columns are returned as string labels. If False, columns
|
403 |
+
are returned as they were given by the user. This can only be True
|
404 |
+
if the ``ColumnTransformer`` is already fitted.
|
405 |
+
|
406 |
+
skip_drop : bool
|
407 |
+
If True, 'drop' transformers are filtered out.
|
408 |
+
|
409 |
+
skip_empty_columns : bool
|
410 |
+
If True, transformers with empty selected columns are filtered out.
|
411 |
+
|
412 |
+
Yields
|
413 |
+
------
|
414 |
+
A generator of tuples containing:
|
415 |
+
- name : the name of the transformer
|
416 |
+
- transformer : the transformer object
|
417 |
+
- columns : the columns for that transformer
|
418 |
+
- weight : the weight of the transformer
|
419 |
+
"""
|
420 |
+
if fitted:
|
421 |
+
transformers = self.transformers_
|
422 |
+
else:
|
423 |
+
# interleave the validated column specifiers
|
424 |
+
transformers = [
|
425 |
+
(name, trans, column)
|
426 |
+
for (name, trans, _), column in zip(self.transformers, self._columns)
|
427 |
+
]
|
428 |
+
# add transformer tuple for remainder
|
429 |
+
if self._remainder[2]:
|
430 |
+
transformers = chain(transformers, [self._remainder])
|
431 |
+
get_weight = (self.transformer_weights or {}).get
|
432 |
+
|
433 |
+
for name, trans, columns in transformers:
|
434 |
+
if skip_drop and trans == "drop":
|
435 |
+
continue
|
436 |
+
if skip_empty_columns and _is_empty_column_selection(columns):
|
437 |
+
continue
|
438 |
+
|
439 |
+
if column_as_labels:
|
440 |
+
# Convert all columns to using their string labels
|
441 |
+
columns_is_scalar = np.isscalar(columns)
|
442 |
+
|
443 |
+
indices = self._transformer_to_input_indices[name]
|
444 |
+
columns = self.feature_names_in_[indices]
|
445 |
+
|
446 |
+
if columns_is_scalar:
|
447 |
+
# selection is done with one dimension
|
448 |
+
columns = columns[0]
|
449 |
+
|
450 |
+
yield (name, trans, columns, get_weight(name))
|
451 |
+
|
452 |
+
def _validate_transformers(self):
|
453 |
+
"""Validate names of transformers and the transformers themselves.
|
454 |
+
|
455 |
+
This checks whether given transformers have the required methods, i.e.
|
456 |
+
`fit` or `fit_transform` and `transform` implemented.
|
457 |
+
"""
|
458 |
+
if not self.transformers:
|
459 |
+
return
|
460 |
+
|
461 |
+
names, transformers, _ = zip(*self.transformers)
|
462 |
+
|
463 |
+
# validate names
|
464 |
+
self._validate_names(names)
|
465 |
+
|
466 |
+
# validate estimators
|
467 |
+
for t in transformers:
|
468 |
+
if t in ("drop", "passthrough"):
|
469 |
+
continue
|
470 |
+
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
|
471 |
+
t, "transform"
|
472 |
+
):
|
473 |
+
# Used to validate the transformers in the `transformers` list
|
474 |
+
raise TypeError(
|
475 |
+
"All estimators should implement fit and "
|
476 |
+
"transform, or can be 'drop' or 'passthrough' "
|
477 |
+
"specifiers. '%s' (type %s) doesn't." % (t, type(t))
|
478 |
+
)
|
479 |
+
|
480 |
+
def _validate_column_callables(self, X):
|
481 |
+
"""
|
482 |
+
Converts callable column specifications.
|
483 |
+
|
484 |
+
This stores a dictionary of the form `{step_name: column_indices}` and
|
485 |
+
calls the `columns` on `X` if `columns` is a callable for a given
|
486 |
+
transformer.
|
487 |
+
|
488 |
+
The results are then stored in `self._transformer_to_input_indices`.
|
489 |
+
"""
|
490 |
+
all_columns = []
|
491 |
+
transformer_to_input_indices = {}
|
492 |
+
for name, _, columns in self.transformers:
|
493 |
+
if callable(columns):
|
494 |
+
columns = columns(X)
|
495 |
+
all_columns.append(columns)
|
496 |
+
transformer_to_input_indices[name] = _get_column_indices(X, columns)
|
497 |
+
|
498 |
+
self._columns = all_columns
|
499 |
+
self._transformer_to_input_indices = transformer_to_input_indices
|
500 |
+
|
501 |
+
def _validate_remainder(self, X):
|
502 |
+
"""
|
503 |
+
Validates ``remainder`` and defines ``_remainder`` targeting
|
504 |
+
the remaining columns.
|
505 |
+
"""
|
506 |
+
cols = set(chain(*self._transformer_to_input_indices.values()))
|
507 |
+
remaining = sorted(set(range(self.n_features_in_)) - cols)
|
508 |
+
self._remainder = ("remainder", self.remainder, remaining)
|
509 |
+
self._transformer_to_input_indices["remainder"] = remaining
|
510 |
+
|
511 |
+
@property
|
512 |
+
def named_transformers_(self):
|
513 |
+
"""Access the fitted transformer by name.
|
514 |
+
|
515 |
+
Read-only attribute to access any transformer by given name.
|
516 |
+
Keys are transformer names and values are the fitted transformer
|
517 |
+
objects.
|
518 |
+
"""
|
519 |
+
# Use Bunch object to improve autocomplete
|
520 |
+
return Bunch(**{name: trans for name, trans, _ in self.transformers_})
|
521 |
+
|
522 |
+
def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in):
|
523 |
+
"""Gets feature names of transformer.
|
524 |
+
|
525 |
+
Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
|
526 |
+
"""
|
527 |
+
column_indices = self._transformer_to_input_indices[name]
|
528 |
+
names = feature_names_in[column_indices]
|
529 |
+
# An actual transformer
|
530 |
+
if not hasattr(trans, "get_feature_names_out"):
|
531 |
+
raise AttributeError(
|
532 |
+
f"Transformer {name} (type {type(trans).__name__}) does "
|
533 |
+
"not provide get_feature_names_out."
|
534 |
+
)
|
535 |
+
return trans.get_feature_names_out(names)
|
536 |
+
|
537 |
+
def get_feature_names_out(self, input_features=None):
|
538 |
+
"""Get output feature names for transformation.
|
539 |
+
|
540 |
+
Parameters
|
541 |
+
----------
|
542 |
+
input_features : array-like of str or None, default=None
|
543 |
+
Input features.
|
544 |
+
|
545 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
546 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
547 |
+
then the following input feature names are generated:
|
548 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
549 |
+
- If `input_features` is an array-like, then `input_features` must
|
550 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
551 |
+
|
552 |
+
Returns
|
553 |
+
-------
|
554 |
+
feature_names_out : ndarray of str objects
|
555 |
+
Transformed feature names.
|
556 |
+
"""
|
557 |
+
check_is_fitted(self)
|
558 |
+
input_features = _check_feature_names_in(self, input_features)
|
559 |
+
|
560 |
+
# List of tuples (name, feature_names_out)
|
561 |
+
transformer_with_feature_names_out = []
|
562 |
+
for name, trans, *_ in self._iter(
|
563 |
+
fitted=True,
|
564 |
+
column_as_labels=False,
|
565 |
+
skip_empty_columns=True,
|
566 |
+
skip_drop=True,
|
567 |
+
):
|
568 |
+
feature_names_out = self._get_feature_name_out_for_transformer(
|
569 |
+
name, trans, input_features
|
570 |
+
)
|
571 |
+
if feature_names_out is None:
|
572 |
+
continue
|
573 |
+
transformer_with_feature_names_out.append((name, feature_names_out))
|
574 |
+
|
575 |
+
if not transformer_with_feature_names_out:
|
576 |
+
# No feature names
|
577 |
+
return np.array([], dtype=object)
|
578 |
+
|
579 |
+
return self._add_prefix_for_feature_names_out(
|
580 |
+
transformer_with_feature_names_out
|
581 |
+
)
|
582 |
+
|
583 |
+
def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
|
584 |
+
"""Add prefix for feature names out that includes the transformer names.
|
585 |
+
|
586 |
+
Parameters
|
587 |
+
----------
|
588 |
+
transformer_with_feature_names_out : list of tuples of (str, array-like of str)
|
589 |
+
The tuple consistent of the transformer's name and its feature names out.
|
590 |
+
|
591 |
+
Returns
|
592 |
+
-------
|
593 |
+
feature_names_out : ndarray of shape (n_features,), dtype=str
|
594 |
+
Transformed feature names.
|
595 |
+
"""
|
596 |
+
if self.verbose_feature_names_out:
|
597 |
+
# Prefix the feature names out with the transformers name
|
598 |
+
names = list(
|
599 |
+
chain.from_iterable(
|
600 |
+
(f"{name}__{i}" for i in feature_names_out)
|
601 |
+
for name, feature_names_out in transformer_with_feature_names_out
|
602 |
+
)
|
603 |
+
)
|
604 |
+
return np.asarray(names, dtype=object)
|
605 |
+
|
606 |
+
# verbose_feature_names_out is False
|
607 |
+
# Check that names are all unique without a prefix
|
608 |
+
feature_names_count = Counter(
|
609 |
+
chain.from_iterable(s for _, s in transformer_with_feature_names_out)
|
610 |
+
)
|
611 |
+
top_6_overlap = [
|
612 |
+
name for name, count in feature_names_count.most_common(6) if count > 1
|
613 |
+
]
|
614 |
+
top_6_overlap.sort()
|
615 |
+
if top_6_overlap:
|
616 |
+
if len(top_6_overlap) == 6:
|
617 |
+
# There are more than 5 overlapping names, we only show the 5
|
618 |
+
# of the feature names
|
619 |
+
names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
|
620 |
+
else:
|
621 |
+
names_repr = str(top_6_overlap)
|
622 |
+
raise ValueError(
|
623 |
+
f"Output feature names: {names_repr} are not unique. Please set "
|
624 |
+
"verbose_feature_names_out=True to add prefixes to feature names"
|
625 |
+
)
|
626 |
+
|
627 |
+
return np.concatenate(
|
628 |
+
[name for _, name in transformer_with_feature_names_out],
|
629 |
+
)
|
630 |
+
|
631 |
+
def _update_fitted_transformers(self, transformers):
|
632 |
+
"""Set self.transformers_ from given transformers.
|
633 |
+
|
634 |
+
Parameters
|
635 |
+
----------
|
636 |
+
transformers : list of estimators
|
637 |
+
The fitted estimators as the output of
|
638 |
+
`self._call_func_on_transformers(func=_fit_transform_one, ...)`.
|
639 |
+
That function doesn't include 'drop' or transformers for which no
|
640 |
+
column is selected. 'drop' is kept as is, and for the no-column
|
641 |
+
transformers the unfitted transformer is put in
|
642 |
+
`self.transformers_`.
|
643 |
+
"""
|
644 |
+
# transformers are fitted; excludes 'drop' cases
|
645 |
+
fitted_transformers = iter(transformers)
|
646 |
+
transformers_ = []
|
647 |
+
|
648 |
+
for name, old, column, _ in self._iter(
|
649 |
+
fitted=False,
|
650 |
+
column_as_labels=False,
|
651 |
+
skip_drop=False,
|
652 |
+
skip_empty_columns=False,
|
653 |
+
):
|
654 |
+
if old == "drop":
|
655 |
+
trans = "drop"
|
656 |
+
elif _is_empty_column_selection(column):
|
657 |
+
trans = old
|
658 |
+
else:
|
659 |
+
trans = next(fitted_transformers)
|
660 |
+
transformers_.append((name, trans, column))
|
661 |
+
|
662 |
+
# sanity check that transformers is exhausted
|
663 |
+
assert not list(fitted_transformers)
|
664 |
+
self.transformers_ = transformers_
|
665 |
+
|
666 |
+
def _validate_output(self, result):
|
667 |
+
"""
|
668 |
+
Ensure that the output of each transformer is 2D. Otherwise
|
669 |
+
hstack can raise an error or produce incorrect results.
|
670 |
+
"""
|
671 |
+
names = [
|
672 |
+
name
|
673 |
+
for name, _, _, _ in self._iter(
|
674 |
+
fitted=True,
|
675 |
+
column_as_labels=False,
|
676 |
+
skip_drop=True,
|
677 |
+
skip_empty_columns=True,
|
678 |
+
)
|
679 |
+
]
|
680 |
+
for Xs, name in zip(result, names):
|
681 |
+
if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"):
|
682 |
+
raise ValueError(
|
683 |
+
"The output of the '{0}' transformer should be 2D (numpy array, "
|
684 |
+
"scipy sparse array, dataframe).".format(name)
|
685 |
+
)
|
686 |
+
if _get_output_config("transform", self)["dense"] == "pandas":
|
687 |
+
return
|
688 |
+
try:
|
689 |
+
import pandas as pd
|
690 |
+
except ImportError:
|
691 |
+
return
|
692 |
+
for Xs, name in zip(result, names):
|
693 |
+
if not _is_pandas_df(Xs):
|
694 |
+
continue
|
695 |
+
for col_name, dtype in Xs.dtypes.to_dict().items():
|
696 |
+
if getattr(dtype, "na_value", None) is not pd.NA:
|
697 |
+
continue
|
698 |
+
if pd.NA not in Xs[col_name].values:
|
699 |
+
continue
|
700 |
+
class_name = self.__class__.__name__
|
701 |
+
# TODO(1.6): replace warning with ValueError
|
702 |
+
warnings.warn(
|
703 |
+
(
|
704 |
+
f"The output of the '{name}' transformer for column"
|
705 |
+
f" '{col_name}' has dtype {dtype} and uses pandas.NA to"
|
706 |
+
" represent null values. Storing this output in a numpy array"
|
707 |
+
" can cause errors in downstream scikit-learn estimators, and"
|
708 |
+
" inefficiencies. Starting with scikit-learn version 1.6, this"
|
709 |
+
" will raise a ValueError. To avoid this problem you can (i)"
|
710 |
+
" store the output in a pandas DataFrame by using"
|
711 |
+
f" {class_name}.set_output(transform='pandas') or (ii) modify"
|
712 |
+
f" the input data or the '{name}' transformer to avoid the"
|
713 |
+
" presence of pandas.NA (for example by using"
|
714 |
+
" pandas.DataFrame.astype)."
|
715 |
+
),
|
716 |
+
FutureWarning,
|
717 |
+
)
|
718 |
+
|
719 |
+
def _record_output_indices(self, Xs):
|
720 |
+
"""
|
721 |
+
Record which transformer produced which column.
|
722 |
+
"""
|
723 |
+
idx = 0
|
724 |
+
self.output_indices_ = {}
|
725 |
+
|
726 |
+
for transformer_idx, (name, _, _, _) in enumerate(
|
727 |
+
self._iter(
|
728 |
+
fitted=True,
|
729 |
+
column_as_labels=False,
|
730 |
+
skip_drop=True,
|
731 |
+
skip_empty_columns=True,
|
732 |
+
)
|
733 |
+
):
|
734 |
+
n_columns = Xs[transformer_idx].shape[1]
|
735 |
+
self.output_indices_[name] = slice(idx, idx + n_columns)
|
736 |
+
idx += n_columns
|
737 |
+
|
738 |
+
# `_iter` only generates transformers that have a non empty
|
739 |
+
# selection. Here we set empty slices for transformers that
|
740 |
+
# generate no output, which are safe for indexing
|
741 |
+
all_names = [t[0] for t in self.transformers] + ["remainder"]
|
742 |
+
for name in all_names:
|
743 |
+
if name not in self.output_indices_:
|
744 |
+
self.output_indices_[name] = slice(0, 0)
|
745 |
+
|
746 |
+
def _log_message(self, name, idx, total):
|
747 |
+
if not self.verbose:
|
748 |
+
return None
|
749 |
+
return "(%d of %d) Processing %s" % (idx, total, name)
|
750 |
+
|
751 |
+
def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params):
|
752 |
+
"""
|
753 |
+
Private function to fit and/or transform on demand.
|
754 |
+
|
755 |
+
Parameters
|
756 |
+
----------
|
757 |
+
X : {array-like, dataframe} of shape (n_samples, n_features)
|
758 |
+
The data to be used in fit and/or transform.
|
759 |
+
|
760 |
+
y : array-like of shape (n_samples,)
|
761 |
+
Targets.
|
762 |
+
|
763 |
+
func : callable
|
764 |
+
Function to call, which can be _fit_transform_one or
|
765 |
+
_transform_one.
|
766 |
+
|
767 |
+
column_as_labels : bool
|
768 |
+
Used to iterate through transformers. If True, columns are returned
|
769 |
+
as strings. If False, columns are returned as they were given by
|
770 |
+
the user. Can be True only if the ``ColumnTransformer`` is already
|
771 |
+
fitted.
|
772 |
+
|
773 |
+
routed_params : dict
|
774 |
+
The routed parameters as the output from ``process_routing``.
|
775 |
+
|
776 |
+
Returns
|
777 |
+
-------
|
778 |
+
Return value (transformers and/or transformed X data) depends
|
779 |
+
on the passed function.
|
780 |
+
"""
|
781 |
+
if func is _fit_transform_one:
|
782 |
+
fitted = False
|
783 |
+
else: # func is _transform_one
|
784 |
+
fitted = True
|
785 |
+
|
786 |
+
transformers = list(
|
787 |
+
self._iter(
|
788 |
+
fitted=fitted,
|
789 |
+
column_as_labels=column_as_labels,
|
790 |
+
skip_drop=True,
|
791 |
+
skip_empty_columns=True,
|
792 |
+
)
|
793 |
+
)
|
794 |
+
try:
|
795 |
+
jobs = []
|
796 |
+
for idx, (name, trans, column, weight) in enumerate(transformers, start=1):
|
797 |
+
if func is _fit_transform_one:
|
798 |
+
if trans == "passthrough":
|
799 |
+
output_config = _get_output_config("transform", self)
|
800 |
+
trans = FunctionTransformer(
|
801 |
+
accept_sparse=True,
|
802 |
+
check_inverse=False,
|
803 |
+
feature_names_out="one-to-one",
|
804 |
+
).set_output(transform=output_config["dense"])
|
805 |
+
|
806 |
+
extra_args = dict(
|
807 |
+
message_clsname="ColumnTransformer",
|
808 |
+
message=self._log_message(name, idx, len(transformers)),
|
809 |
+
)
|
810 |
+
else: # func is _transform_one
|
811 |
+
extra_args = {}
|
812 |
+
jobs.append(
|
813 |
+
delayed(func)(
|
814 |
+
transformer=clone(trans) if not fitted else trans,
|
815 |
+
X=_safe_indexing(X, column, axis=1),
|
816 |
+
y=y,
|
817 |
+
weight=weight,
|
818 |
+
**extra_args,
|
819 |
+
params=routed_params[name],
|
820 |
+
)
|
821 |
+
)
|
822 |
+
|
823 |
+
return Parallel(n_jobs=self.n_jobs)(jobs)
|
824 |
+
|
825 |
+
except ValueError as e:
|
826 |
+
if "Expected 2D array, got 1D array instead" in str(e):
|
827 |
+
raise ValueError(_ERR_MSG_1DCOLUMN) from e
|
828 |
+
else:
|
829 |
+
raise
|
830 |
+
|
831 |
+
def fit(self, X, y=None, **params):
|
832 |
+
"""Fit all transformers using X.
|
833 |
+
|
834 |
+
Parameters
|
835 |
+
----------
|
836 |
+
X : {array-like, dataframe} of shape (n_samples, n_features)
|
837 |
+
Input data, of which specified subsets are used to fit the
|
838 |
+
transformers.
|
839 |
+
|
840 |
+
y : array-like of shape (n_samples,...), default=None
|
841 |
+
Targets for supervised learning.
|
842 |
+
|
843 |
+
**params : dict, default=None
|
844 |
+
Parameters to be passed to the underlying transformers' ``fit`` and
|
845 |
+
``transform`` methods.
|
846 |
+
|
847 |
+
You can only pass this if metadata routing is enabled, which you
|
848 |
+
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
|
849 |
+
|
850 |
+
.. versionadded:: 1.4
|
851 |
+
|
852 |
+
Returns
|
853 |
+
-------
|
854 |
+
self : ColumnTransformer
|
855 |
+
This estimator.
|
856 |
+
"""
|
857 |
+
_raise_for_params(params, self, "fit")
|
858 |
+
# we use fit_transform to make sure to set sparse_output_ (for which we
|
859 |
+
# need the transformed data) to have consistent output type in predict
|
860 |
+
self.fit_transform(X, y=y, **params)
|
861 |
+
return self
|
862 |
+
|
863 |
+
@_fit_context(
|
864 |
+
# estimators in ColumnTransformer.transformers are not validated yet
|
865 |
+
prefer_skip_nested_validation=False
|
866 |
+
)
|
867 |
+
def fit_transform(self, X, y=None, **params):
|
868 |
+
"""Fit all transformers, transform the data and concatenate results.
|
869 |
+
|
870 |
+
Parameters
|
871 |
+
----------
|
872 |
+
X : {array-like, dataframe} of shape (n_samples, n_features)
|
873 |
+
Input data, of which specified subsets are used to fit the
|
874 |
+
transformers.
|
875 |
+
|
876 |
+
y : array-like of shape (n_samples,), default=None
|
877 |
+
Targets for supervised learning.
|
878 |
+
|
879 |
+
**params : dict, default=None
|
880 |
+
Parameters to be passed to the underlying transformers' ``fit`` and
|
881 |
+
``transform`` methods.
|
882 |
+
|
883 |
+
You can only pass this if metadata routing is enabled, which you
|
884 |
+
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
|
885 |
+
|
886 |
+
.. versionadded:: 1.4
|
887 |
+
|
888 |
+
Returns
|
889 |
+
-------
|
890 |
+
X_t : {array-like, sparse matrix} of \
|
891 |
+
shape (n_samples, sum_n_components)
|
892 |
+
Horizontally stacked results of transformers. sum_n_components is the
|
893 |
+
sum of n_components (output dimension) over transformers. If
|
894 |
+
any result is a sparse matrix, everything will be converted to
|
895 |
+
sparse matrices.
|
896 |
+
"""
|
897 |
+
_raise_for_params(params, self, "fit_transform")
|
898 |
+
self._check_feature_names(X, reset=True)
|
899 |
+
|
900 |
+
X = _check_X(X)
|
901 |
+
# set n_features_in_ attribute
|
902 |
+
self._check_n_features(X, reset=True)
|
903 |
+
self._validate_transformers()
|
904 |
+
n_samples = _num_samples(X)
|
905 |
+
|
906 |
+
self._validate_column_callables(X)
|
907 |
+
self._validate_remainder(X)
|
908 |
+
|
909 |
+
if _routing_enabled():
|
910 |
+
routed_params = process_routing(self, "fit_transform", **params)
|
911 |
+
else:
|
912 |
+
routed_params = self._get_empty_routing()
|
913 |
+
|
914 |
+
result = self._call_func_on_transformers(
|
915 |
+
X,
|
916 |
+
y,
|
917 |
+
_fit_transform_one,
|
918 |
+
column_as_labels=False,
|
919 |
+
routed_params=routed_params,
|
920 |
+
)
|
921 |
+
|
922 |
+
if not result:
|
923 |
+
self._update_fitted_transformers([])
|
924 |
+
# All transformers are None
|
925 |
+
return np.zeros((n_samples, 0))
|
926 |
+
|
927 |
+
Xs, transformers = zip(*result)
|
928 |
+
|
929 |
+
# determine if concatenated output will be sparse or not
|
930 |
+
if any(sparse.issparse(X) for X in Xs):
|
931 |
+
nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
|
932 |
+
total = sum(
|
933 |
+
X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs
|
934 |
+
)
|
935 |
+
density = nnz / total
|
936 |
+
self.sparse_output_ = density < self.sparse_threshold
|
937 |
+
else:
|
938 |
+
self.sparse_output_ = False
|
939 |
+
|
940 |
+
self._update_fitted_transformers(transformers)
|
941 |
+
self._validate_output(Xs)
|
942 |
+
self._record_output_indices(Xs)
|
943 |
+
|
944 |
+
return self._hstack(list(Xs), n_samples=n_samples)
|
945 |
+
|
946 |
+
def transform(self, X, **params):
|
947 |
+
"""Transform X separately by each transformer, concatenate results.
|
948 |
+
|
949 |
+
Parameters
|
950 |
+
----------
|
951 |
+
X : {array-like, dataframe} of shape (n_samples, n_features)
|
952 |
+
The data to be transformed by subset.
|
953 |
+
|
954 |
+
**params : dict, default=None
|
955 |
+
Parameters to be passed to the underlying transformers' ``transform``
|
956 |
+
method.
|
957 |
+
|
958 |
+
You can only pass this if metadata routing is enabled, which you
|
959 |
+
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
|
960 |
+
|
961 |
+
.. versionadded:: 1.4
|
962 |
+
|
963 |
+
Returns
|
964 |
+
-------
|
965 |
+
X_t : {array-like, sparse matrix} of \
|
966 |
+
shape (n_samples, sum_n_components)
|
967 |
+
Horizontally stacked results of transformers. sum_n_components is the
|
968 |
+
sum of n_components (output dimension) over transformers. If
|
969 |
+
any result is a sparse matrix, everything will be converted to
|
970 |
+
sparse matrices.
|
971 |
+
"""
|
972 |
+
_raise_for_params(params, self, "transform")
|
973 |
+
check_is_fitted(self)
|
974 |
+
X = _check_X(X)
|
975 |
+
|
976 |
+
# If ColumnTransformer is fit using a dataframe, and now a dataframe is
|
977 |
+
# passed to be transformed, we select columns by name instead. This
|
978 |
+
# enables the user to pass X at transform time with extra columns which
|
979 |
+
# were not present in fit time, and the order of the columns doesn't
|
980 |
+
# matter.
|
981 |
+
fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and (
|
982 |
+
_is_pandas_df(X) or hasattr(X, "__dataframe__")
|
983 |
+
)
|
984 |
+
|
985 |
+
n_samples = _num_samples(X)
|
986 |
+
column_names = _get_feature_names(X)
|
987 |
+
|
988 |
+
if fit_dataframe_and_transform_dataframe:
|
989 |
+
named_transformers = self.named_transformers_
|
990 |
+
# check that all names seen in fit are in transform, unless
|
991 |
+
# they were dropped
|
992 |
+
non_dropped_indices = [
|
993 |
+
ind
|
994 |
+
for name, ind in self._transformer_to_input_indices.items()
|
995 |
+
if name in named_transformers and named_transformers[name] != "drop"
|
996 |
+
]
|
997 |
+
|
998 |
+
all_indices = set(chain(*non_dropped_indices))
|
999 |
+
all_names = set(self.feature_names_in_[ind] for ind in all_indices)
|
1000 |
+
|
1001 |
+
diff = all_names - set(column_names)
|
1002 |
+
if diff:
|
1003 |
+
raise ValueError(f"columns are missing: {diff}")
|
1004 |
+
else:
|
1005 |
+
# ndarray was used for fitting or transforming, thus we only
|
1006 |
+
# check that n_features_in_ is consistent
|
1007 |
+
self._check_n_features(X, reset=False)
|
1008 |
+
|
1009 |
+
if _routing_enabled():
|
1010 |
+
routed_params = process_routing(self, "transform", **params)
|
1011 |
+
else:
|
1012 |
+
routed_params = self._get_empty_routing()
|
1013 |
+
|
1014 |
+
Xs = self._call_func_on_transformers(
|
1015 |
+
X,
|
1016 |
+
None,
|
1017 |
+
_transform_one,
|
1018 |
+
column_as_labels=fit_dataframe_and_transform_dataframe,
|
1019 |
+
routed_params=routed_params,
|
1020 |
+
)
|
1021 |
+
self._validate_output(Xs)
|
1022 |
+
|
1023 |
+
if not Xs:
|
1024 |
+
# All transformers are None
|
1025 |
+
return np.zeros((n_samples, 0))
|
1026 |
+
|
1027 |
+
return self._hstack(list(Xs), n_samples=n_samples)
|
1028 |
+
|
1029 |
+
def _hstack(self, Xs, *, n_samples):
|
1030 |
+
"""Stacks Xs horizontally.
|
1031 |
+
|
1032 |
+
This allows subclasses to control the stacking behavior, while reusing
|
1033 |
+
everything else from ColumnTransformer.
|
1034 |
+
|
1035 |
+
Parameters
|
1036 |
+
----------
|
1037 |
+
Xs : list of {array-like, sparse matrix, dataframe}
|
1038 |
+
The container to concatenate.
|
1039 |
+
n_samples : int
|
1040 |
+
The number of samples in the input data to checking the transformation
|
1041 |
+
consistency.
|
1042 |
+
"""
|
1043 |
+
if self.sparse_output_:
|
1044 |
+
try:
|
1045 |
+
# since all columns should be numeric before stacking them
|
1046 |
+
# in a sparse matrix, `check_array` is used for the
|
1047 |
+
# dtype conversion if necessary.
|
1048 |
+
converted_Xs = [
|
1049 |
+
check_array(X, accept_sparse=True, force_all_finite=False)
|
1050 |
+
for X in Xs
|
1051 |
+
]
|
1052 |
+
except ValueError as e:
|
1053 |
+
raise ValueError(
|
1054 |
+
"For a sparse output, all columns should "
|
1055 |
+
"be a numeric or convertible to a numeric."
|
1056 |
+
) from e
|
1057 |
+
|
1058 |
+
return sparse.hstack(converted_Xs).tocsr()
|
1059 |
+
else:
|
1060 |
+
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
|
1061 |
+
adapter = _get_container_adapter("transform", self)
|
1062 |
+
if adapter and all(adapter.is_supported_container(X) for X in Xs):
|
1063 |
+
# rename before stacking as it avoids to error on temporary duplicated
|
1064 |
+
# columns
|
1065 |
+
transformer_names = [
|
1066 |
+
t[0]
|
1067 |
+
for t in self._iter(
|
1068 |
+
fitted=True,
|
1069 |
+
column_as_labels=False,
|
1070 |
+
skip_drop=True,
|
1071 |
+
skip_empty_columns=True,
|
1072 |
+
)
|
1073 |
+
]
|
1074 |
+
feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
|
1075 |
+
if self.verbose_feature_names_out:
|
1076 |
+
# `_add_prefix_for_feature_names_out` takes care about raising
|
1077 |
+
# an error if there are duplicated columns.
|
1078 |
+
feature_names_outs = self._add_prefix_for_feature_names_out(
|
1079 |
+
list(zip(transformer_names, feature_names_outs))
|
1080 |
+
)
|
1081 |
+
else:
|
1082 |
+
# check for duplicated columns and raise if any
|
1083 |
+
feature_names_outs = list(chain.from_iterable(feature_names_outs))
|
1084 |
+
feature_names_count = Counter(feature_names_outs)
|
1085 |
+
if any(count > 1 for count in feature_names_count.values()):
|
1086 |
+
duplicated_feature_names = sorted(
|
1087 |
+
name
|
1088 |
+
for name, count in feature_names_count.items()
|
1089 |
+
if count > 1
|
1090 |
+
)
|
1091 |
+
err_msg = (
|
1092 |
+
"Duplicated feature names found before concatenating the"
|
1093 |
+
" outputs of the transformers:"
|
1094 |
+
f" {duplicated_feature_names}.\n"
|
1095 |
+
)
|
1096 |
+
for transformer_name, X in zip(transformer_names, Xs):
|
1097 |
+
if X.shape[1] == 0:
|
1098 |
+
continue
|
1099 |
+
dup_cols_in_transformer = sorted(
|
1100 |
+
set(X.columns).intersection(duplicated_feature_names)
|
1101 |
+
)
|
1102 |
+
if len(dup_cols_in_transformer):
|
1103 |
+
err_msg += (
|
1104 |
+
f"Transformer {transformer_name} has conflicting "
|
1105 |
+
f"columns names: {dup_cols_in_transformer}.\n"
|
1106 |
+
)
|
1107 |
+
raise ValueError(
|
1108 |
+
err_msg
|
1109 |
+
+ "Either make sure that the transformers named above "
|
1110 |
+
"do not generate columns with conflicting names or set "
|
1111 |
+
"verbose_feature_names_out=True to automatically "
|
1112 |
+
"prefix to the output feature names with the name "
|
1113 |
+
"of the transformer to prevent any conflicting "
|
1114 |
+
"names."
|
1115 |
+
)
|
1116 |
+
|
1117 |
+
names_idx = 0
|
1118 |
+
for X in Xs:
|
1119 |
+
if X.shape[1] == 0:
|
1120 |
+
continue
|
1121 |
+
names_out = feature_names_outs[names_idx : names_idx + X.shape[1]]
|
1122 |
+
adapter.rename_columns(X, names_out)
|
1123 |
+
names_idx += X.shape[1]
|
1124 |
+
|
1125 |
+
output = adapter.hstack(Xs)
|
1126 |
+
output_samples = output.shape[0]
|
1127 |
+
if output_samples != n_samples:
|
1128 |
+
raise ValueError(
|
1129 |
+
"Concatenating DataFrames from the transformer's output lead to"
|
1130 |
+
" an inconsistent number of samples. The output may have Pandas"
|
1131 |
+
" Indexes that do not match, or that transformers are returning"
|
1132 |
+
" number of samples which are not the same as the number input"
|
1133 |
+
" samples."
|
1134 |
+
)
|
1135 |
+
|
1136 |
+
return output
|
1137 |
+
|
1138 |
+
return np.hstack(Xs)
|
1139 |
+
|
1140 |
+
def _sk_visual_block_(self):
|
1141 |
+
if isinstance(self.remainder, str) and self.remainder == "drop":
|
1142 |
+
transformers = self.transformers
|
1143 |
+
elif hasattr(self, "_remainder"):
|
1144 |
+
remainder_columns = self._remainder[2]
|
1145 |
+
if (
|
1146 |
+
hasattr(self, "feature_names_in_")
|
1147 |
+
and remainder_columns
|
1148 |
+
and not all(isinstance(col, str) for col in remainder_columns)
|
1149 |
+
):
|
1150 |
+
remainder_columns = self.feature_names_in_[remainder_columns].tolist()
|
1151 |
+
transformers = chain(
|
1152 |
+
self.transformers, [("remainder", self.remainder, remainder_columns)]
|
1153 |
+
)
|
1154 |
+
else:
|
1155 |
+
transformers = chain(self.transformers, [("remainder", self.remainder, "")])
|
1156 |
+
|
1157 |
+
names, transformers, name_details = zip(*transformers)
|
1158 |
+
return _VisualBlock(
|
1159 |
+
"parallel", transformers, names=names, name_details=name_details
|
1160 |
+
)
|
1161 |
+
|
1162 |
+
def _get_empty_routing(self):
|
1163 |
+
"""Return empty routing.
|
1164 |
+
|
1165 |
+
Used while routing can be disabled.
|
1166 |
+
|
1167 |
+
TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no
|
1168 |
+
more an option.
|
1169 |
+
"""
|
1170 |
+
return Bunch(
|
1171 |
+
**{
|
1172 |
+
name: Bunch(**{method: {} for method in METHODS})
|
1173 |
+
for name, step, _, _ in self._iter(
|
1174 |
+
fitted=False,
|
1175 |
+
column_as_labels=False,
|
1176 |
+
skip_drop=True,
|
1177 |
+
skip_empty_columns=True,
|
1178 |
+
)
|
1179 |
+
}
|
1180 |
+
)
|
1181 |
+
|
1182 |
+
def get_metadata_routing(self):
|
1183 |
+
"""Get metadata routing of this object.
|
1184 |
+
|
1185 |
+
Please check :ref:`User Guide <metadata_routing>` on how the routing
|
1186 |
+
mechanism works.
|
1187 |
+
|
1188 |
+
.. versionadded:: 1.4
|
1189 |
+
|
1190 |
+
Returns
|
1191 |
+
-------
|
1192 |
+
routing : MetadataRouter
|
1193 |
+
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
|
1194 |
+
routing information.
|
1195 |
+
"""
|
1196 |
+
router = MetadataRouter(owner=self.__class__.__name__)
|
1197 |
+
# Here we don't care about which columns are used for which
|
1198 |
+
# transformers, and whether or not a transformer is used at all, which
|
1199 |
+
# might happen if no columns are selected for that transformer. We
|
1200 |
+
# request all metadata requested by all transformers.
|
1201 |
+
transformers = chain(self.transformers, [("remainder", self.remainder, None)])
|
1202 |
+
for name, step, _ in transformers:
|
1203 |
+
method_mapping = MethodMapping()
|
1204 |
+
if hasattr(step, "fit_transform"):
|
1205 |
+
(
|
1206 |
+
method_mapping.add(caller="fit", callee="fit_transform").add(
|
1207 |
+
caller="fit_transform", callee="fit_transform"
|
1208 |
+
)
|
1209 |
+
)
|
1210 |
+
else:
|
1211 |
+
(
|
1212 |
+
method_mapping.add(caller="fit", callee="fit")
|
1213 |
+
.add(caller="fit", callee="transform")
|
1214 |
+
.add(caller="fit_transform", callee="fit")
|
1215 |
+
.add(caller="fit_transform", callee="transform")
|
1216 |
+
)
|
1217 |
+
method_mapping.add(caller="transform", callee="transform")
|
1218 |
+
router.add(method_mapping=method_mapping, **{name: step})
|
1219 |
+
|
1220 |
+
return router
|
1221 |
+
|
1222 |
+
|
1223 |
+
def _check_X(X):
|
1224 |
+
"""Use check_array only when necessary, e.g. on lists and other non-array-likes."""
|
1225 |
+
if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X):
|
1226 |
+
return X
|
1227 |
+
return check_array(X, force_all_finite="allow-nan", dtype=object)
|
1228 |
+
|
1229 |
+
|
1230 |
+
def _is_empty_column_selection(column):
|
1231 |
+
"""
|
1232 |
+
Return True if the column selection is empty (empty list or all-False
|
1233 |
+
boolean array).
|
1234 |
+
|
1235 |
+
"""
|
1236 |
+
if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_):
|
1237 |
+
return not column.any()
|
1238 |
+
elif hasattr(column, "__len__"):
|
1239 |
+
return (
|
1240 |
+
len(column) == 0
|
1241 |
+
or all(isinstance(col, bool) for col in column)
|
1242 |
+
and not any(column)
|
1243 |
+
)
|
1244 |
+
else:
|
1245 |
+
return False
|
1246 |
+
|
1247 |
+
|
1248 |
+
def _get_transformer_list(estimators):
|
1249 |
+
"""
|
1250 |
+
Construct (name, trans, column) tuples from list
|
1251 |
+
|
1252 |
+
"""
|
1253 |
+
transformers, columns = zip(*estimators)
|
1254 |
+
names, _ = zip(*_name_estimators(transformers))
|
1255 |
+
|
1256 |
+
transformer_list = list(zip(names, transformers, columns))
|
1257 |
+
return transformer_list
|
1258 |
+
|
1259 |
+
|
1260 |
+
# This function is not validated using validate_params because
|
1261 |
+
# it's just a factory for ColumnTransformer.
|
1262 |
+
def make_column_transformer(
|
1263 |
+
*transformers,
|
1264 |
+
remainder="drop",
|
1265 |
+
sparse_threshold=0.3,
|
1266 |
+
n_jobs=None,
|
1267 |
+
verbose=False,
|
1268 |
+
verbose_feature_names_out=True,
|
1269 |
+
):
|
1270 |
+
"""Construct a ColumnTransformer from the given transformers.
|
1271 |
+
|
1272 |
+
This is a shorthand for the ColumnTransformer constructor; it does not
|
1273 |
+
require, and does not permit, naming the transformers. Instead, they will
|
1274 |
+
be given names automatically based on their types. It also does not allow
|
1275 |
+
weighting with ``transformer_weights``.
|
1276 |
+
|
1277 |
+
Read more in the :ref:`User Guide <make_column_transformer>`.
|
1278 |
+
|
1279 |
+
Parameters
|
1280 |
+
----------
|
1281 |
+
*transformers : tuples
|
1282 |
+
Tuples of the form (transformer, columns) specifying the
|
1283 |
+
transformer objects to be applied to subsets of the data.
|
1284 |
+
|
1285 |
+
transformer : {'drop', 'passthrough'} or estimator
|
1286 |
+
Estimator must support :term:`fit` and :term:`transform`.
|
1287 |
+
Special-cased strings 'drop' and 'passthrough' are accepted as
|
1288 |
+
well, to indicate to drop the columns or to pass them through
|
1289 |
+
untransformed, respectively.
|
1290 |
+
columns : str, array-like of str, int, array-like of int, slice, \
|
1291 |
+
array-like of bool or callable
|
1292 |
+
Indexes the data on its second axis. Integers are interpreted as
|
1293 |
+
positional columns, while strings can reference DataFrame columns
|
1294 |
+
by name. A scalar string or int should be used where
|
1295 |
+
``transformer`` expects X to be a 1d array-like (vector),
|
1296 |
+
otherwise a 2d array will be passed to the transformer.
|
1297 |
+
A callable is passed the input data `X` and can return any of the
|
1298 |
+
above. To select multiple columns by name or dtype, you can use
|
1299 |
+
:obj:`make_column_selector`.
|
1300 |
+
|
1301 |
+
remainder : {'drop', 'passthrough'} or estimator, default='drop'
|
1302 |
+
By default, only the specified columns in `transformers` are
|
1303 |
+
transformed and combined in the output, and the non-specified
|
1304 |
+
columns are dropped. (default of ``'drop'``).
|
1305 |
+
By specifying ``remainder='passthrough'``, all remaining columns that
|
1306 |
+
were not specified in `transformers` will be automatically passed
|
1307 |
+
through. This subset of columns is concatenated with the output of
|
1308 |
+
the transformers.
|
1309 |
+
By setting ``remainder`` to be an estimator, the remaining
|
1310 |
+
non-specified columns will use the ``remainder`` estimator. The
|
1311 |
+
estimator must support :term:`fit` and :term:`transform`.
|
1312 |
+
|
1313 |
+
sparse_threshold : float, default=0.3
|
1314 |
+
If the transformed output consists of a mix of sparse and dense data,
|
1315 |
+
it will be stacked as a sparse matrix if the density is lower than this
|
1316 |
+
value. Use ``sparse_threshold=0`` to always return dense.
|
1317 |
+
When the transformed output consists of all sparse or all dense data,
|
1318 |
+
the stacked result will be sparse or dense, respectively, and this
|
1319 |
+
keyword will be ignored.
|
1320 |
+
|
1321 |
+
n_jobs : int, default=None
|
1322 |
+
Number of jobs to run in parallel.
|
1323 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
1324 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
1325 |
+
for more details.
|
1326 |
+
|
1327 |
+
verbose : bool, default=False
|
1328 |
+
If True, the time elapsed while fitting each transformer will be
|
1329 |
+
printed as it is completed.
|
1330 |
+
|
1331 |
+
verbose_feature_names_out : bool, default=True
|
1332 |
+
If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
|
1333 |
+
all feature names with the name of the transformer that generated that
|
1334 |
+
feature.
|
1335 |
+
If False, :meth:`ColumnTransformer.get_feature_names_out` will not
|
1336 |
+
prefix any feature names and will error if feature names are not
|
1337 |
+
unique.
|
1338 |
+
|
1339 |
+
.. versionadded:: 1.0
|
1340 |
+
|
1341 |
+
Returns
|
1342 |
+
-------
|
1343 |
+
ct : ColumnTransformer
|
1344 |
+
Returns a :class:`ColumnTransformer` object.
|
1345 |
+
|
1346 |
+
See Also
|
1347 |
+
--------
|
1348 |
+
ColumnTransformer : Class that allows combining the
|
1349 |
+
outputs of multiple transformer objects used on column subsets
|
1350 |
+
of the data into a single feature space.
|
1351 |
+
|
1352 |
+
Examples
|
1353 |
+
--------
|
1354 |
+
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
1355 |
+
>>> from sklearn.compose import make_column_transformer
|
1356 |
+
>>> make_column_transformer(
|
1357 |
+
... (StandardScaler(), ['numerical_column']),
|
1358 |
+
... (OneHotEncoder(), ['categorical_column']))
|
1359 |
+
ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
|
1360 |
+
['numerical_column']),
|
1361 |
+
('onehotencoder', OneHotEncoder(...),
|
1362 |
+
['categorical_column'])])
|
1363 |
+
"""
|
1364 |
+
# transformer_weights keyword is not passed through because the user
|
1365 |
+
# would need to know the automatically generated names of the transformers
|
1366 |
+
transformer_list = _get_transformer_list(transformers)
|
1367 |
+
return ColumnTransformer(
|
1368 |
+
transformer_list,
|
1369 |
+
n_jobs=n_jobs,
|
1370 |
+
remainder=remainder,
|
1371 |
+
sparse_threshold=sparse_threshold,
|
1372 |
+
verbose=verbose,
|
1373 |
+
verbose_feature_names_out=verbose_feature_names_out,
|
1374 |
+
)
|
1375 |
+
|
1376 |
+
|
1377 |
+
class make_column_selector:
|
1378 |
+
"""Create a callable to select columns to be used with
|
1379 |
+
:class:`ColumnTransformer`.
|
1380 |
+
|
1381 |
+
:func:`make_column_selector` can select columns based on datatype or the
|
1382 |
+
columns name with a regex. When using multiple selection criteria, **all**
|
1383 |
+
criteria must match for a column to be selected.
|
1384 |
+
|
1385 |
+
For an example of how to use :func:`make_column_selector` within a
|
1386 |
+
:class:`ColumnTransformer` to select columns based on data type (i.e.
|
1387 |
+
`dtype`), refer to
|
1388 |
+
:ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
|
1389 |
+
|
1390 |
+
Parameters
|
1391 |
+
----------
|
1392 |
+
pattern : str, default=None
|
1393 |
+
Name of columns containing this regex pattern will be included. If
|
1394 |
+
None, column selection will not be selected based on pattern.
|
1395 |
+
|
1396 |
+
dtype_include : column dtype or list of column dtypes, default=None
|
1397 |
+
A selection of dtypes to include. For more details, see
|
1398 |
+
:meth:`pandas.DataFrame.select_dtypes`.
|
1399 |
+
|
1400 |
+
dtype_exclude : column dtype or list of column dtypes, default=None
|
1401 |
+
A selection of dtypes to exclude. For more details, see
|
1402 |
+
:meth:`pandas.DataFrame.select_dtypes`.
|
1403 |
+
|
1404 |
+
Returns
|
1405 |
+
-------
|
1406 |
+
selector : callable
|
1407 |
+
Callable for column selection to be used by a
|
1408 |
+
:class:`ColumnTransformer`.
|
1409 |
+
|
1410 |
+
See Also
|
1411 |
+
--------
|
1412 |
+
ColumnTransformer : Class that allows combining the
|
1413 |
+
outputs of multiple transformer objects used on column subsets
|
1414 |
+
of the data into a single feature space.
|
1415 |
+
|
1416 |
+
Examples
|
1417 |
+
--------
|
1418 |
+
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
1419 |
+
>>> from sklearn.compose import make_column_transformer
|
1420 |
+
>>> from sklearn.compose import make_column_selector
|
1421 |
+
>>> import numpy as np
|
1422 |
+
>>> import pandas as pd # doctest: +SKIP
|
1423 |
+
>>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
|
1424 |
+
... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
|
1425 |
+
>>> ct = make_column_transformer(
|
1426 |
+
... (StandardScaler(),
|
1427 |
+
... make_column_selector(dtype_include=np.number)), # rating
|
1428 |
+
... (OneHotEncoder(),
|
1429 |
+
... make_column_selector(dtype_include=object))) # city
|
1430 |
+
>>> ct.fit_transform(X) # doctest: +SKIP
|
1431 |
+
array([[ 0.90453403, 1. , 0. , 0. ],
|
1432 |
+
[-1.50755672, 1. , 0. , 0. ],
|
1433 |
+
[-0.30151134, 0. , 1. , 0. ],
|
1434 |
+
[ 0.90453403, 0. , 0. , 1. ]])
|
1435 |
+
"""
|
1436 |
+
|
1437 |
+
def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None):
|
1438 |
+
self.pattern = pattern
|
1439 |
+
self.dtype_include = dtype_include
|
1440 |
+
self.dtype_exclude = dtype_exclude
|
1441 |
+
|
1442 |
+
def __call__(self, df):
|
1443 |
+
"""Callable for column selection to be used by a
|
1444 |
+
:class:`ColumnTransformer`.
|
1445 |
+
|
1446 |
+
Parameters
|
1447 |
+
----------
|
1448 |
+
df : dataframe of shape (n_features, n_samples)
|
1449 |
+
DataFrame to select columns from.
|
1450 |
+
"""
|
1451 |
+
if not hasattr(df, "iloc"):
|
1452 |
+
raise ValueError(
|
1453 |
+
"make_column_selector can only be applied to pandas dataframes"
|
1454 |
+
)
|
1455 |
+
df_row = df.iloc[:1]
|
1456 |
+
if self.dtype_include is not None or self.dtype_exclude is not None:
|
1457 |
+
df_row = df_row.select_dtypes(
|
1458 |
+
include=self.dtype_include, exclude=self.dtype_exclude
|
1459 |
+
)
|
1460 |
+
cols = df_row.columns
|
1461 |
+
if self.pattern is not None:
|
1462 |
+
cols = cols[cols.str.contains(self.pattern, regex=True)]
|
1463 |
+
return cols.tolist()
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/_target.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Authors: Andreas Mueller <[email protected]>
|
2 |
+
# Guillaume Lemaitre <[email protected]>
|
3 |
+
# License: BSD 3 clause
|
4 |
+
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from ..base import BaseEstimator, RegressorMixin, _fit_context, clone
|
10 |
+
from ..exceptions import NotFittedError
|
11 |
+
from ..preprocessing import FunctionTransformer
|
12 |
+
from ..utils import _safe_indexing, check_array
|
13 |
+
from ..utils._param_validation import HasMethods
|
14 |
+
from ..utils._tags import _safe_tags
|
15 |
+
from ..utils.metadata_routing import (
|
16 |
+
_raise_for_unsupported_routing,
|
17 |
+
_RoutingNotSupportedMixin,
|
18 |
+
)
|
19 |
+
from ..utils.validation import check_is_fitted
|
20 |
+
|
21 |
+
__all__ = ["TransformedTargetRegressor"]
|
22 |
+
|
23 |
+
|
24 |
+
class TransformedTargetRegressor(
|
25 |
+
_RoutingNotSupportedMixin, RegressorMixin, BaseEstimator
|
26 |
+
):
|
27 |
+
"""Meta-estimator to regress on a transformed target.
|
28 |
+
|
29 |
+
Useful for applying a non-linear transformation to the target `y` in
|
30 |
+
regression problems. This transformation can be given as a Transformer
|
31 |
+
such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a
|
32 |
+
function and its inverse such as `np.log` and `np.exp`.
|
33 |
+
|
34 |
+
The computation during :meth:`fit` is::
|
35 |
+
|
36 |
+
regressor.fit(X, func(y))
|
37 |
+
|
38 |
+
or::
|
39 |
+
|
40 |
+
regressor.fit(X, transformer.transform(y))
|
41 |
+
|
42 |
+
The computation during :meth:`predict` is::
|
43 |
+
|
44 |
+
inverse_func(regressor.predict(X))
|
45 |
+
|
46 |
+
or::
|
47 |
+
|
48 |
+
transformer.inverse_transform(regressor.predict(X))
|
49 |
+
|
50 |
+
Read more in the :ref:`User Guide <transformed_target_regressor>`.
|
51 |
+
|
52 |
+
.. versionadded:: 0.20
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
regressor : object, default=None
|
57 |
+
Regressor object such as derived from
|
58 |
+
:class:`~sklearn.base.RegressorMixin`. This regressor will
|
59 |
+
automatically be cloned each time prior to fitting. If `regressor is
|
60 |
+
None`, :class:`~sklearn.linear_model.LinearRegression` is created and used.
|
61 |
+
|
62 |
+
transformer : object, default=None
|
63 |
+
Estimator object such as derived from
|
64 |
+
:class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time
|
65 |
+
as `func` and `inverse_func`. If `transformer is None` as well as
|
66 |
+
`func` and `inverse_func`, the transformer will be an identity
|
67 |
+
transformer. Note that the transformer will be cloned during fitting.
|
68 |
+
Also, the transformer is restricting `y` to be a numpy array.
|
69 |
+
|
70 |
+
func : function, default=None
|
71 |
+
Function to apply to `y` before passing to :meth:`fit`. Cannot be set
|
72 |
+
at the same time as `transformer`. The function needs to return a
|
73 |
+
2-dimensional array. If `func is None`, the function used will be the
|
74 |
+
identity function.
|
75 |
+
|
76 |
+
inverse_func : function, default=None
|
77 |
+
Function to apply to the prediction of the regressor. Cannot be set at
|
78 |
+
the same time as `transformer`. The function needs to return a
|
79 |
+
2-dimensional array. The inverse function is used to return
|
80 |
+
predictions to the same space of the original training labels.
|
81 |
+
|
82 |
+
check_inverse : bool, default=True
|
83 |
+
Whether to check that `transform` followed by `inverse_transform`
|
84 |
+
or `func` followed by `inverse_func` leads to the original targets.
|
85 |
+
|
86 |
+
Attributes
|
87 |
+
----------
|
88 |
+
regressor_ : object
|
89 |
+
Fitted regressor.
|
90 |
+
|
91 |
+
transformer_ : object
|
92 |
+
Transformer used in :meth:`fit` and :meth:`predict`.
|
93 |
+
|
94 |
+
n_features_in_ : int
|
95 |
+
Number of features seen during :term:`fit`. Only defined if the
|
96 |
+
underlying regressor exposes such an attribute when fit.
|
97 |
+
|
98 |
+
.. versionadded:: 0.24
|
99 |
+
|
100 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
101 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
102 |
+
has feature names that are all strings.
|
103 |
+
|
104 |
+
.. versionadded:: 1.0
|
105 |
+
|
106 |
+
See Also
|
107 |
+
--------
|
108 |
+
sklearn.preprocessing.FunctionTransformer : Construct a transformer from an
|
109 |
+
arbitrary callable.
|
110 |
+
|
111 |
+
Notes
|
112 |
+
-----
|
113 |
+
Internally, the target `y` is always converted into a 2-dimensional array
|
114 |
+
to be used by scikit-learn transformers. At the time of prediction, the
|
115 |
+
output will be reshaped to a have the same number of dimensions as `y`.
|
116 |
+
|
117 |
+
Examples
|
118 |
+
--------
|
119 |
+
>>> import numpy as np
|
120 |
+
>>> from sklearn.linear_model import LinearRegression
|
121 |
+
>>> from sklearn.compose import TransformedTargetRegressor
|
122 |
+
>>> tt = TransformedTargetRegressor(regressor=LinearRegression(),
|
123 |
+
... func=np.log, inverse_func=np.exp)
|
124 |
+
>>> X = np.arange(4).reshape(-1, 1)
|
125 |
+
>>> y = np.exp(2 * X).ravel()
|
126 |
+
>>> tt.fit(X, y)
|
127 |
+
TransformedTargetRegressor(...)
|
128 |
+
>>> tt.score(X, y)
|
129 |
+
1.0
|
130 |
+
>>> tt.regressor_.coef_
|
131 |
+
array([2.])
|
132 |
+
|
133 |
+
For a more detailed example use case refer to
|
134 |
+
:ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`.
|
135 |
+
"""
|
136 |
+
|
137 |
+
_parameter_constraints: dict = {
|
138 |
+
"regressor": [HasMethods(["fit", "predict"]), None],
|
139 |
+
"transformer": [HasMethods("transform"), None],
|
140 |
+
"func": [callable, None],
|
141 |
+
"inverse_func": [callable, None],
|
142 |
+
"check_inverse": ["boolean"],
|
143 |
+
}
|
144 |
+
|
145 |
+
def __init__(
|
146 |
+
self,
|
147 |
+
regressor=None,
|
148 |
+
*,
|
149 |
+
transformer=None,
|
150 |
+
func=None,
|
151 |
+
inverse_func=None,
|
152 |
+
check_inverse=True,
|
153 |
+
):
|
154 |
+
self.regressor = regressor
|
155 |
+
self.transformer = transformer
|
156 |
+
self.func = func
|
157 |
+
self.inverse_func = inverse_func
|
158 |
+
self.check_inverse = check_inverse
|
159 |
+
|
160 |
+
def _fit_transformer(self, y):
|
161 |
+
"""Check transformer and fit transformer.
|
162 |
+
|
163 |
+
Create the default transformer, fit it and make additional inverse
|
164 |
+
check on a subset (optional).
|
165 |
+
|
166 |
+
"""
|
167 |
+
if self.transformer is not None and (
|
168 |
+
self.func is not None or self.inverse_func is not None
|
169 |
+
):
|
170 |
+
raise ValueError(
|
171 |
+
"'transformer' and functions 'func'/'inverse_func' cannot both be set."
|
172 |
+
)
|
173 |
+
elif self.transformer is not None:
|
174 |
+
self.transformer_ = clone(self.transformer)
|
175 |
+
else:
|
176 |
+
if self.func is not None and self.inverse_func is None:
|
177 |
+
raise ValueError(
|
178 |
+
"When 'func' is provided, 'inverse_func' must also be provided"
|
179 |
+
)
|
180 |
+
self.transformer_ = FunctionTransformer(
|
181 |
+
func=self.func,
|
182 |
+
inverse_func=self.inverse_func,
|
183 |
+
validate=True,
|
184 |
+
check_inverse=self.check_inverse,
|
185 |
+
)
|
186 |
+
# XXX: sample_weight is not currently passed to the
|
187 |
+
# transformer. However, if transformer starts using sample_weight, the
|
188 |
+
# code should be modified accordingly. At the time to consider the
|
189 |
+
# sample_prop feature, it is also a good use case to be considered.
|
190 |
+
self.transformer_.fit(y)
|
191 |
+
if self.check_inverse:
|
192 |
+
idx_selected = slice(None, None, max(1, y.shape[0] // 10))
|
193 |
+
y_sel = _safe_indexing(y, idx_selected)
|
194 |
+
y_sel_t = self.transformer_.transform(y_sel)
|
195 |
+
if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)):
|
196 |
+
warnings.warn(
|
197 |
+
(
|
198 |
+
"The provided functions or transformer are"
|
199 |
+
" not strictly inverse of each other. If"
|
200 |
+
" you are sure you want to proceed regardless"
|
201 |
+
", set 'check_inverse=False'"
|
202 |
+
),
|
203 |
+
UserWarning,
|
204 |
+
)
|
205 |
+
|
206 |
+
@_fit_context(
|
207 |
+
# TransformedTargetRegressor.regressor/transformer are not validated yet.
|
208 |
+
prefer_skip_nested_validation=False
|
209 |
+
)
|
210 |
+
def fit(self, X, y, **fit_params):
|
211 |
+
"""Fit the model according to the given training data.
|
212 |
+
|
213 |
+
Parameters
|
214 |
+
----------
|
215 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
216 |
+
Training vector, where `n_samples` is the number of samples and
|
217 |
+
`n_features` is the number of features.
|
218 |
+
|
219 |
+
y : array-like of shape (n_samples,)
|
220 |
+
Target values.
|
221 |
+
|
222 |
+
**fit_params : dict
|
223 |
+
Parameters passed to the `fit` method of the underlying
|
224 |
+
regressor.
|
225 |
+
|
226 |
+
Returns
|
227 |
+
-------
|
228 |
+
self : object
|
229 |
+
Fitted estimator.
|
230 |
+
"""
|
231 |
+
_raise_for_unsupported_routing(self, "fit", **fit_params)
|
232 |
+
if y is None:
|
233 |
+
raise ValueError(
|
234 |
+
f"This {self.__class__.__name__} estimator "
|
235 |
+
"requires y to be passed, but the target y is None."
|
236 |
+
)
|
237 |
+
y = check_array(
|
238 |
+
y,
|
239 |
+
input_name="y",
|
240 |
+
accept_sparse=False,
|
241 |
+
force_all_finite=True,
|
242 |
+
ensure_2d=False,
|
243 |
+
dtype="numeric",
|
244 |
+
allow_nd=True,
|
245 |
+
)
|
246 |
+
|
247 |
+
# store the number of dimension of the target to predict an array of
|
248 |
+
# similar shape at predict
|
249 |
+
self._training_dim = y.ndim
|
250 |
+
|
251 |
+
# transformers are designed to modify X which is 2d dimensional, we
|
252 |
+
# need to modify y accordingly.
|
253 |
+
if y.ndim == 1:
|
254 |
+
y_2d = y.reshape(-1, 1)
|
255 |
+
else:
|
256 |
+
y_2d = y
|
257 |
+
self._fit_transformer(y_2d)
|
258 |
+
|
259 |
+
# transform y and convert back to 1d array if needed
|
260 |
+
y_trans = self.transformer_.transform(y_2d)
|
261 |
+
# FIXME: a FunctionTransformer can return a 1D array even when validate
|
262 |
+
# is set to True. Therefore, we need to check the number of dimension
|
263 |
+
# first.
|
264 |
+
if y_trans.ndim == 2 and y_trans.shape[1] == 1:
|
265 |
+
y_trans = y_trans.squeeze(axis=1)
|
266 |
+
|
267 |
+
if self.regressor is None:
|
268 |
+
from ..linear_model import LinearRegression
|
269 |
+
|
270 |
+
self.regressor_ = LinearRegression()
|
271 |
+
else:
|
272 |
+
self.regressor_ = clone(self.regressor)
|
273 |
+
|
274 |
+
self.regressor_.fit(X, y_trans, **fit_params)
|
275 |
+
|
276 |
+
if hasattr(self.regressor_, "feature_names_in_"):
|
277 |
+
self.feature_names_in_ = self.regressor_.feature_names_in_
|
278 |
+
|
279 |
+
return self
|
280 |
+
|
281 |
+
def predict(self, X, **predict_params):
|
282 |
+
"""Predict using the base regressor, applying inverse.
|
283 |
+
|
284 |
+
The regressor is used to predict and the `inverse_func` or
|
285 |
+
`inverse_transform` is applied before returning the prediction.
|
286 |
+
|
287 |
+
Parameters
|
288 |
+
----------
|
289 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
290 |
+
Samples.
|
291 |
+
|
292 |
+
**predict_params : dict of str -> object
|
293 |
+
Parameters passed to the `predict` method of the underlying
|
294 |
+
regressor.
|
295 |
+
|
296 |
+
Returns
|
297 |
+
-------
|
298 |
+
y_hat : ndarray of shape (n_samples,)
|
299 |
+
Predicted values.
|
300 |
+
"""
|
301 |
+
check_is_fitted(self)
|
302 |
+
pred = self.regressor_.predict(X, **predict_params)
|
303 |
+
if pred.ndim == 1:
|
304 |
+
pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1))
|
305 |
+
else:
|
306 |
+
pred_trans = self.transformer_.inverse_transform(pred)
|
307 |
+
if (
|
308 |
+
self._training_dim == 1
|
309 |
+
and pred_trans.ndim == 2
|
310 |
+
and pred_trans.shape[1] == 1
|
311 |
+
):
|
312 |
+
pred_trans = pred_trans.squeeze(axis=1)
|
313 |
+
|
314 |
+
return pred_trans
|
315 |
+
|
316 |
+
def _more_tags(self):
|
317 |
+
regressor = self.regressor
|
318 |
+
if regressor is None:
|
319 |
+
from ..linear_model import LinearRegression
|
320 |
+
|
321 |
+
regressor = LinearRegression()
|
322 |
+
|
323 |
+
return {
|
324 |
+
"poor_score": True,
|
325 |
+
"multioutput": _safe_tags(regressor, key="multioutput"),
|
326 |
+
}
|
327 |
+
|
328 |
+
@property
|
329 |
+
def n_features_in_(self):
|
330 |
+
"""Number of features seen during :term:`fit`."""
|
331 |
+
# For consistency with other estimators we raise a AttributeError so
|
332 |
+
# that hasattr() returns False the estimator isn't fitted.
|
333 |
+
try:
|
334 |
+
check_is_fitted(self)
|
335 |
+
except NotFittedError as nfe:
|
336 |
+
raise AttributeError(
|
337 |
+
"{} object has no n_features_in_ attribute.".format(
|
338 |
+
self.__class__.__name__
|
339 |
+
)
|
340 |
+
) from nfe
|
341 |
+
|
342 |
+
return self.regressor_.n_features_in_
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (186 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_column_transformer.cpython-310.pyc
ADDED
Binary file (60.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/__pycache__/test_target.cpython-310.pyc
ADDED
Binary file (11.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_column_transformer.py
ADDED
@@ -0,0 +1,2582 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Test the ColumnTransformer.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import pickle
|
6 |
+
import re
|
7 |
+
import warnings
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import pytest
|
11 |
+
from numpy.testing import assert_allclose
|
12 |
+
from scipy import sparse
|
13 |
+
|
14 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
15 |
+
from sklearn.compose import (
|
16 |
+
ColumnTransformer,
|
17 |
+
make_column_selector,
|
18 |
+
make_column_transformer,
|
19 |
+
)
|
20 |
+
from sklearn.exceptions import NotFittedError
|
21 |
+
from sklearn.feature_selection import VarianceThreshold
|
22 |
+
from sklearn.preprocessing import (
|
23 |
+
FunctionTransformer,
|
24 |
+
Normalizer,
|
25 |
+
OneHotEncoder,
|
26 |
+
StandardScaler,
|
27 |
+
)
|
28 |
+
from sklearn.tests.metadata_routing_common import (
|
29 |
+
ConsumingTransformer,
|
30 |
+
_Registry,
|
31 |
+
check_recorded_metadata,
|
32 |
+
)
|
33 |
+
from sklearn.utils._testing import (
|
34 |
+
_convert_container,
|
35 |
+
assert_allclose_dense_sparse,
|
36 |
+
assert_almost_equal,
|
37 |
+
assert_array_equal,
|
38 |
+
)
|
39 |
+
from sklearn.utils.fixes import CSR_CONTAINERS
|
40 |
+
|
41 |
+
|
42 |
+
class Trans(TransformerMixin, BaseEstimator):
|
43 |
+
def fit(self, X, y=None):
|
44 |
+
return self
|
45 |
+
|
46 |
+
def transform(self, X, y=None):
|
47 |
+
# 1D Series -> 2D DataFrame
|
48 |
+
if hasattr(X, "to_frame"):
|
49 |
+
return X.to_frame()
|
50 |
+
# 1D array -> 2D array
|
51 |
+
if getattr(X, "ndim", 2) == 1:
|
52 |
+
return np.atleast_2d(X).T
|
53 |
+
return X
|
54 |
+
|
55 |
+
|
56 |
+
class DoubleTrans(BaseEstimator):
|
57 |
+
def fit(self, X, y=None):
|
58 |
+
return self
|
59 |
+
|
60 |
+
def transform(self, X):
|
61 |
+
return 2 * X
|
62 |
+
|
63 |
+
|
64 |
+
class SparseMatrixTrans(BaseEstimator):
|
65 |
+
def __init__(self, csr_container):
|
66 |
+
self.csr_container = csr_container
|
67 |
+
|
68 |
+
def fit(self, X, y=None):
|
69 |
+
return self
|
70 |
+
|
71 |
+
def transform(self, X, y=None):
|
72 |
+
n_samples = len(X)
|
73 |
+
return self.csr_container(sparse.eye(n_samples, n_samples))
|
74 |
+
|
75 |
+
|
76 |
+
class TransNo2D(BaseEstimator):
|
77 |
+
def fit(self, X, y=None):
|
78 |
+
return self
|
79 |
+
|
80 |
+
def transform(self, X, y=None):
|
81 |
+
return X
|
82 |
+
|
83 |
+
|
84 |
+
class TransRaise(BaseEstimator):
|
85 |
+
def fit(self, X, y=None):
|
86 |
+
raise ValueError("specific message")
|
87 |
+
|
88 |
+
def transform(self, X, y=None):
|
89 |
+
raise ValueError("specific message")
|
90 |
+
|
91 |
+
|
92 |
+
def test_column_transformer():
|
93 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
94 |
+
|
95 |
+
X_res_first1D = np.array([0, 1, 2])
|
96 |
+
X_res_second1D = np.array([2, 4, 6])
|
97 |
+
X_res_first = X_res_first1D.reshape(-1, 1)
|
98 |
+
X_res_both = X_array
|
99 |
+
|
100 |
+
cases = [
|
101 |
+
# single column 1D / 2D
|
102 |
+
(0, X_res_first),
|
103 |
+
([0], X_res_first),
|
104 |
+
# list-like
|
105 |
+
([0, 1], X_res_both),
|
106 |
+
(np.array([0, 1]), X_res_both),
|
107 |
+
# slice
|
108 |
+
(slice(0, 1), X_res_first),
|
109 |
+
(slice(0, 2), X_res_both),
|
110 |
+
# boolean mask
|
111 |
+
(np.array([True, False]), X_res_first),
|
112 |
+
([True, False], X_res_first),
|
113 |
+
(np.array([True, True]), X_res_both),
|
114 |
+
([True, True], X_res_both),
|
115 |
+
]
|
116 |
+
|
117 |
+
for selection, res in cases:
|
118 |
+
ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
|
119 |
+
assert_array_equal(ct.fit_transform(X_array), res)
|
120 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), res)
|
121 |
+
|
122 |
+
# callable that returns any of the allowed specifiers
|
123 |
+
ct = ColumnTransformer(
|
124 |
+
[("trans", Trans(), lambda x: selection)], remainder="drop"
|
125 |
+
)
|
126 |
+
assert_array_equal(ct.fit_transform(X_array), res)
|
127 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), res)
|
128 |
+
|
129 |
+
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
|
130 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_both)
|
131 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
|
132 |
+
assert len(ct.transformers_) == 2
|
133 |
+
|
134 |
+
# test with transformer_weights
|
135 |
+
transformer_weights = {"trans1": 0.1, "trans2": 10}
|
136 |
+
both = ColumnTransformer(
|
137 |
+
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
|
138 |
+
transformer_weights=transformer_weights,
|
139 |
+
)
|
140 |
+
res = np.vstack(
|
141 |
+
[
|
142 |
+
transformer_weights["trans1"] * X_res_first1D,
|
143 |
+
transformer_weights["trans2"] * X_res_second1D,
|
144 |
+
]
|
145 |
+
).T
|
146 |
+
assert_array_equal(both.fit_transform(X_array), res)
|
147 |
+
assert_array_equal(both.fit(X_array).transform(X_array), res)
|
148 |
+
assert len(both.transformers_) == 2
|
149 |
+
|
150 |
+
both = ColumnTransformer(
|
151 |
+
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
|
152 |
+
)
|
153 |
+
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
|
154 |
+
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
|
155 |
+
assert len(both.transformers_) == 1
|
156 |
+
|
157 |
+
|
158 |
+
def test_column_transformer_tuple_transformers_parameter():
|
159 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
160 |
+
|
161 |
+
transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])]
|
162 |
+
|
163 |
+
ct_with_list = ColumnTransformer(transformers)
|
164 |
+
ct_with_tuple = ColumnTransformer(tuple(transformers))
|
165 |
+
|
166 |
+
assert_array_equal(
|
167 |
+
ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array)
|
168 |
+
)
|
169 |
+
assert_array_equal(
|
170 |
+
ct_with_list.fit(X_array).transform(X_array),
|
171 |
+
ct_with_tuple.fit(X_array).transform(X_array),
|
172 |
+
)
|
173 |
+
|
174 |
+
|
175 |
+
@pytest.mark.parametrize("constructor_name", ["dataframe", "polars"])
|
176 |
+
def test_column_transformer_dataframe(constructor_name):
|
177 |
+
if constructor_name == "dataframe":
|
178 |
+
dataframe_lib = pytest.importorskip("pandas")
|
179 |
+
else:
|
180 |
+
dataframe_lib = pytest.importorskip(constructor_name)
|
181 |
+
|
182 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
183 |
+
X_df = _convert_container(
|
184 |
+
X_array, constructor_name, columns_name=["first", "second"]
|
185 |
+
)
|
186 |
+
|
187 |
+
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
|
188 |
+
X_res_both = X_array
|
189 |
+
|
190 |
+
cases = [
|
191 |
+
# String keys: label based
|
192 |
+
# list
|
193 |
+
(["first"], X_res_first),
|
194 |
+
(["first", "second"], X_res_both),
|
195 |
+
# slice
|
196 |
+
(slice("first", "second"), X_res_both),
|
197 |
+
# int keys: positional
|
198 |
+
# list
|
199 |
+
([0], X_res_first),
|
200 |
+
([0, 1], X_res_both),
|
201 |
+
(np.array([0, 1]), X_res_both),
|
202 |
+
# slice
|
203 |
+
(slice(0, 1), X_res_first),
|
204 |
+
(slice(0, 2), X_res_both),
|
205 |
+
# boolean mask
|
206 |
+
(np.array([True, False]), X_res_first),
|
207 |
+
([True, False], X_res_first),
|
208 |
+
]
|
209 |
+
if constructor_name == "dataframe":
|
210 |
+
# Scalars are only supported for pandas dataframes.
|
211 |
+
cases.extend(
|
212 |
+
[
|
213 |
+
# scalar
|
214 |
+
(0, X_res_first),
|
215 |
+
("first", X_res_first),
|
216 |
+
(
|
217 |
+
dataframe_lib.Series([True, False], index=["first", "second"]),
|
218 |
+
X_res_first,
|
219 |
+
),
|
220 |
+
]
|
221 |
+
)
|
222 |
+
|
223 |
+
for selection, res in cases:
|
224 |
+
ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
|
225 |
+
assert_array_equal(ct.fit_transform(X_df), res)
|
226 |
+
assert_array_equal(ct.fit(X_df).transform(X_df), res)
|
227 |
+
|
228 |
+
# callable that returns any of the allowed specifiers
|
229 |
+
ct = ColumnTransformer(
|
230 |
+
[("trans", Trans(), lambda X: selection)], remainder="drop"
|
231 |
+
)
|
232 |
+
assert_array_equal(ct.fit_transform(X_df), res)
|
233 |
+
assert_array_equal(ct.fit(X_df).transform(X_df), res)
|
234 |
+
|
235 |
+
ct = ColumnTransformer(
|
236 |
+
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
|
237 |
+
)
|
238 |
+
assert_array_equal(ct.fit_transform(X_df), X_res_both)
|
239 |
+
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
|
240 |
+
assert len(ct.transformers_) == 2
|
241 |
+
assert ct.transformers_[-1][0] != "remainder"
|
242 |
+
|
243 |
+
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
|
244 |
+
assert_array_equal(ct.fit_transform(X_df), X_res_both)
|
245 |
+
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
|
246 |
+
assert len(ct.transformers_) == 2
|
247 |
+
assert ct.transformers_[-1][0] != "remainder"
|
248 |
+
|
249 |
+
# test with transformer_weights
|
250 |
+
transformer_weights = {"trans1": 0.1, "trans2": 10}
|
251 |
+
both = ColumnTransformer(
|
252 |
+
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])],
|
253 |
+
transformer_weights=transformer_weights,
|
254 |
+
)
|
255 |
+
res = np.vstack(
|
256 |
+
[
|
257 |
+
transformer_weights["trans1"] * X_df["first"],
|
258 |
+
transformer_weights["trans2"] * X_df["second"],
|
259 |
+
]
|
260 |
+
).T
|
261 |
+
assert_array_equal(both.fit_transform(X_df), res)
|
262 |
+
assert_array_equal(both.fit(X_df).transform(X_df), res)
|
263 |
+
assert len(both.transformers_) == 2
|
264 |
+
assert both.transformers_[-1][0] != "remainder"
|
265 |
+
|
266 |
+
# test multiple columns
|
267 |
+
both = ColumnTransformer(
|
268 |
+
[("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1}
|
269 |
+
)
|
270 |
+
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
|
271 |
+
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
|
272 |
+
assert len(both.transformers_) == 1
|
273 |
+
assert both.transformers_[-1][0] != "remainder"
|
274 |
+
|
275 |
+
both = ColumnTransformer(
|
276 |
+
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
|
277 |
+
)
|
278 |
+
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
|
279 |
+
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
|
280 |
+
assert len(both.transformers_) == 1
|
281 |
+
assert both.transformers_[-1][0] != "remainder"
|
282 |
+
|
283 |
+
# ensure pandas object is passed through
|
284 |
+
|
285 |
+
class TransAssert(BaseEstimator):
|
286 |
+
def __init__(self, expected_type_transform):
|
287 |
+
self.expected_type_transform = expected_type_transform
|
288 |
+
|
289 |
+
def fit(self, X, y=None):
|
290 |
+
return self
|
291 |
+
|
292 |
+
def transform(self, X, y=None):
|
293 |
+
assert isinstance(X, self.expected_type_transform)
|
294 |
+
if isinstance(X, dataframe_lib.Series):
|
295 |
+
X = X.to_frame()
|
296 |
+
return X
|
297 |
+
|
298 |
+
ct = ColumnTransformer(
|
299 |
+
[
|
300 |
+
(
|
301 |
+
"trans",
|
302 |
+
TransAssert(expected_type_transform=dataframe_lib.DataFrame),
|
303 |
+
["first", "second"],
|
304 |
+
)
|
305 |
+
]
|
306 |
+
)
|
307 |
+
ct.fit_transform(X_df)
|
308 |
+
|
309 |
+
if constructor_name == "dataframe":
|
310 |
+
# DataFrame protocol does not have 1d columns, so we only test on Pandas
|
311 |
+
# dataframes.
|
312 |
+
ct = ColumnTransformer(
|
313 |
+
[
|
314 |
+
(
|
315 |
+
"trans",
|
316 |
+
TransAssert(expected_type_transform=dataframe_lib.Series),
|
317 |
+
"first",
|
318 |
+
)
|
319 |
+
],
|
320 |
+
remainder="drop",
|
321 |
+
)
|
322 |
+
ct.fit_transform(X_df)
|
323 |
+
|
324 |
+
# Only test on pandas because the dataframe protocol requires string column
|
325 |
+
# names
|
326 |
+
# integer column spec + integer column names -> still use positional
|
327 |
+
X_df2 = X_df.copy()
|
328 |
+
X_df2.columns = [1, 0]
|
329 |
+
ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop")
|
330 |
+
assert_array_equal(ct.fit_transform(X_df2), X_res_first)
|
331 |
+
assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)
|
332 |
+
|
333 |
+
assert len(ct.transformers_) == 2
|
334 |
+
assert ct.transformers_[-1][0] == "remainder"
|
335 |
+
assert ct.transformers_[-1][1] == "drop"
|
336 |
+
assert_array_equal(ct.transformers_[-1][2], [1])
|
337 |
+
|
338 |
+
|
339 |
+
@pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"])
|
340 |
+
@pytest.mark.parametrize(
|
341 |
+
"column_selection",
|
342 |
+
[[], np.array([False, False]), [False, False]],
|
343 |
+
ids=["list", "bool", "bool_int"],
|
344 |
+
)
|
345 |
+
@pytest.mark.parametrize("callable_column", [False, True])
|
346 |
+
def test_column_transformer_empty_columns(pandas, column_selection, callable_column):
|
347 |
+
# test case that ensures that the column transformer does also work when
|
348 |
+
# a given transformer doesn't have any columns to work on
|
349 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
350 |
+
X_res_both = X_array
|
351 |
+
|
352 |
+
if pandas:
|
353 |
+
pd = pytest.importorskip("pandas")
|
354 |
+
X = pd.DataFrame(X_array, columns=["first", "second"])
|
355 |
+
else:
|
356 |
+
X = X_array
|
357 |
+
|
358 |
+
if callable_column:
|
359 |
+
column = lambda X: column_selection # noqa
|
360 |
+
else:
|
361 |
+
column = column_selection
|
362 |
+
|
363 |
+
ct = ColumnTransformer(
|
364 |
+
[("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)]
|
365 |
+
)
|
366 |
+
assert_array_equal(ct.fit_transform(X), X_res_both)
|
367 |
+
assert_array_equal(ct.fit(X).transform(X), X_res_both)
|
368 |
+
assert len(ct.transformers_) == 2
|
369 |
+
assert isinstance(ct.transformers_[1][1], TransRaise)
|
370 |
+
|
371 |
+
ct = ColumnTransformer(
|
372 |
+
[("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])]
|
373 |
+
)
|
374 |
+
assert_array_equal(ct.fit_transform(X), X_res_both)
|
375 |
+
assert_array_equal(ct.fit(X).transform(X), X_res_both)
|
376 |
+
assert len(ct.transformers_) == 2
|
377 |
+
assert isinstance(ct.transformers_[0][1], TransRaise)
|
378 |
+
|
379 |
+
ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough")
|
380 |
+
assert_array_equal(ct.fit_transform(X), X_res_both)
|
381 |
+
assert_array_equal(ct.fit(X).transform(X), X_res_both)
|
382 |
+
assert len(ct.transformers_) == 2 # including remainder
|
383 |
+
assert isinstance(ct.transformers_[0][1], TransRaise)
|
384 |
+
|
385 |
+
fixture = np.array([[], [], []])
|
386 |
+
ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop")
|
387 |
+
assert_array_equal(ct.fit_transform(X), fixture)
|
388 |
+
assert_array_equal(ct.fit(X).transform(X), fixture)
|
389 |
+
assert len(ct.transformers_) == 2 # including remainder
|
390 |
+
assert isinstance(ct.transformers_[0][1], TransRaise)
|
391 |
+
|
392 |
+
|
393 |
+
def test_column_transformer_output_indices():
|
394 |
+
# Checks for the output_indices_ attribute
|
395 |
+
X_array = np.arange(6).reshape(3, 2)
|
396 |
+
|
397 |
+
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
|
398 |
+
X_trans = ct.fit_transform(X_array)
|
399 |
+
assert ct.output_indices_ == {
|
400 |
+
"trans1": slice(0, 1),
|
401 |
+
"trans2": slice(1, 2),
|
402 |
+
"remainder": slice(0, 0),
|
403 |
+
}
|
404 |
+
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
|
405 |
+
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
|
406 |
+
|
407 |
+
# test with transformer_weights and multiple columns
|
408 |
+
ct = ColumnTransformer(
|
409 |
+
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
|
410 |
+
)
|
411 |
+
X_trans = ct.fit_transform(X_array)
|
412 |
+
assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)}
|
413 |
+
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]])
|
414 |
+
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
|
415 |
+
|
416 |
+
# test case that ensures that the attribute does also work when
|
417 |
+
# a given transformer doesn't have any columns to work on
|
418 |
+
ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])])
|
419 |
+
X_trans = ct.fit_transform(X_array)
|
420 |
+
assert ct.output_indices_ == {
|
421 |
+
"trans1": slice(0, 2),
|
422 |
+
"trans2": slice(0, 0),
|
423 |
+
"remainder": slice(0, 0),
|
424 |
+
}
|
425 |
+
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]])
|
426 |
+
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]])
|
427 |
+
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
|
428 |
+
|
429 |
+
ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough")
|
430 |
+
X_trans = ct.fit_transform(X_array)
|
431 |
+
assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)}
|
432 |
+
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]])
|
433 |
+
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]])
|
434 |
+
|
435 |
+
|
436 |
+
def test_column_transformer_output_indices_df():
|
437 |
+
# Checks for the output_indices_ attribute with data frames
|
438 |
+
pd = pytest.importorskip("pandas")
|
439 |
+
|
440 |
+
X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"])
|
441 |
+
|
442 |
+
ct = ColumnTransformer(
|
443 |
+
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
|
444 |
+
)
|
445 |
+
X_trans = ct.fit_transform(X_df)
|
446 |
+
assert ct.output_indices_ == {
|
447 |
+
"trans1": slice(0, 1),
|
448 |
+
"trans2": slice(1, 2),
|
449 |
+
"remainder": slice(0, 0),
|
450 |
+
}
|
451 |
+
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
|
452 |
+
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
|
453 |
+
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
|
454 |
+
|
455 |
+
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
|
456 |
+
X_trans = ct.fit_transform(X_df)
|
457 |
+
assert ct.output_indices_ == {
|
458 |
+
"trans1": slice(0, 1),
|
459 |
+
"trans2": slice(1, 2),
|
460 |
+
"remainder": slice(0, 0),
|
461 |
+
}
|
462 |
+
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
|
463 |
+
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
|
464 |
+
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
|
465 |
+
|
466 |
+
|
467 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
468 |
+
def test_column_transformer_sparse_array(csr_container):
|
469 |
+
X_sparse = csr_container(sparse.eye(3, 2))
|
470 |
+
|
471 |
+
# no distinction between 1D and 2D
|
472 |
+
X_res_first = X_sparse[:, [0]]
|
473 |
+
X_res_both = X_sparse
|
474 |
+
|
475 |
+
for col in [(0,), [0], slice(0, 1)]:
|
476 |
+
for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]:
|
477 |
+
ct = ColumnTransformer(
|
478 |
+
[("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8
|
479 |
+
)
|
480 |
+
assert sparse.issparse(ct.fit_transform(X_sparse))
|
481 |
+
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
|
482 |
+
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res)
|
483 |
+
|
484 |
+
for col in [[0, 1], slice(0, 2)]:
|
485 |
+
ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8)
|
486 |
+
assert sparse.issparse(ct.fit_transform(X_sparse))
|
487 |
+
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
|
488 |
+
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both)
|
489 |
+
|
490 |
+
|
491 |
+
def test_column_transformer_list():
|
492 |
+
X_list = [[1, float("nan"), "a"], [0, 0, "b"]]
|
493 |
+
expected_result = np.array(
|
494 |
+
[
|
495 |
+
[1, float("nan"), 1, 0],
|
496 |
+
[-1, 0, 0, 1],
|
497 |
+
]
|
498 |
+
)
|
499 |
+
|
500 |
+
ct = ColumnTransformer(
|
501 |
+
[
|
502 |
+
("numerical", StandardScaler(), [0, 1]),
|
503 |
+
("categorical", OneHotEncoder(), [2]),
|
504 |
+
]
|
505 |
+
)
|
506 |
+
|
507 |
+
assert_array_equal(ct.fit_transform(X_list), expected_result)
|
508 |
+
assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
|
509 |
+
|
510 |
+
|
511 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
512 |
+
def test_column_transformer_sparse_stacking(csr_container):
|
513 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
514 |
+
col_trans = ColumnTransformer(
|
515 |
+
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
|
516 |
+
sparse_threshold=0.8,
|
517 |
+
)
|
518 |
+
col_trans.fit(X_array)
|
519 |
+
X_trans = col_trans.transform(X_array)
|
520 |
+
assert sparse.issparse(X_trans)
|
521 |
+
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
|
522 |
+
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
|
523 |
+
assert len(col_trans.transformers_) == 2
|
524 |
+
assert col_trans.transformers_[-1][0] != "remainder"
|
525 |
+
|
526 |
+
col_trans = ColumnTransformer(
|
527 |
+
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
|
528 |
+
sparse_threshold=0.1,
|
529 |
+
)
|
530 |
+
col_trans.fit(X_array)
|
531 |
+
X_trans = col_trans.transform(X_array)
|
532 |
+
assert not sparse.issparse(X_trans)
|
533 |
+
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
|
534 |
+
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
|
535 |
+
|
536 |
+
|
537 |
+
def test_column_transformer_mixed_cols_sparse():
|
538 |
+
df = np.array([["a", 1, True], ["b", 2, False]], dtype="O")
|
539 |
+
|
540 |
+
ct = make_column_transformer(
|
541 |
+
(OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0
|
542 |
+
)
|
543 |
+
|
544 |
+
# this shouldn't fail, since boolean can be coerced into a numeric
|
545 |
+
# See: https://github.com/scikit-learn/scikit-learn/issues/11912
|
546 |
+
X_trans = ct.fit_transform(df)
|
547 |
+
assert X_trans.getformat() == "csr"
|
548 |
+
assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]]))
|
549 |
+
|
550 |
+
ct = make_column_transformer(
|
551 |
+
(OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0
|
552 |
+
)
|
553 |
+
with pytest.raises(ValueError, match="For a sparse output, all columns should"):
|
554 |
+
# this fails since strings `a` and `b` cannot be
|
555 |
+
# coerced into a numeric.
|
556 |
+
ct.fit_transform(df)
|
557 |
+
|
558 |
+
|
559 |
+
def test_column_transformer_sparse_threshold():
|
560 |
+
X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T
|
561 |
+
# above data has sparsity of 4 / 8 = 0.5
|
562 |
+
|
563 |
+
# apply threshold even if all sparse
|
564 |
+
col_trans = ColumnTransformer(
|
565 |
+
[("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])],
|
566 |
+
sparse_threshold=0.2,
|
567 |
+
)
|
568 |
+
res = col_trans.fit_transform(X_array)
|
569 |
+
assert not sparse.issparse(res)
|
570 |
+
assert not col_trans.sparse_output_
|
571 |
+
|
572 |
+
# mixed -> sparsity of (4 + 2) / 8 = 0.75
|
573 |
+
for thres in [0.75001, 1]:
|
574 |
+
col_trans = ColumnTransformer(
|
575 |
+
[
|
576 |
+
("trans1", OneHotEncoder(sparse_output=True), [0]),
|
577 |
+
("trans2", OneHotEncoder(sparse_output=False), [1]),
|
578 |
+
],
|
579 |
+
sparse_threshold=thres,
|
580 |
+
)
|
581 |
+
res = col_trans.fit_transform(X_array)
|
582 |
+
assert sparse.issparse(res)
|
583 |
+
assert col_trans.sparse_output_
|
584 |
+
|
585 |
+
for thres in [0.75, 0]:
|
586 |
+
col_trans = ColumnTransformer(
|
587 |
+
[
|
588 |
+
("trans1", OneHotEncoder(sparse_output=True), [0]),
|
589 |
+
("trans2", OneHotEncoder(sparse_output=False), [1]),
|
590 |
+
],
|
591 |
+
sparse_threshold=thres,
|
592 |
+
)
|
593 |
+
res = col_trans.fit_transform(X_array)
|
594 |
+
assert not sparse.issparse(res)
|
595 |
+
assert not col_trans.sparse_output_
|
596 |
+
|
597 |
+
# if nothing is sparse -> no sparse
|
598 |
+
for thres in [0.33, 0, 1]:
|
599 |
+
col_trans = ColumnTransformer(
|
600 |
+
[
|
601 |
+
("trans1", OneHotEncoder(sparse_output=False), [0]),
|
602 |
+
("trans2", OneHotEncoder(sparse_output=False), [1]),
|
603 |
+
],
|
604 |
+
sparse_threshold=thres,
|
605 |
+
)
|
606 |
+
res = col_trans.fit_transform(X_array)
|
607 |
+
assert not sparse.issparse(res)
|
608 |
+
assert not col_trans.sparse_output_
|
609 |
+
|
610 |
+
|
611 |
+
def test_column_transformer_error_msg_1D():
|
612 |
+
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
|
613 |
+
|
614 |
+
col_trans = ColumnTransformer([("trans", StandardScaler(), 0)])
|
615 |
+
msg = "1D data passed to a transformer"
|
616 |
+
with pytest.raises(ValueError, match=msg):
|
617 |
+
col_trans.fit(X_array)
|
618 |
+
|
619 |
+
with pytest.raises(ValueError, match=msg):
|
620 |
+
col_trans.fit_transform(X_array)
|
621 |
+
|
622 |
+
col_trans = ColumnTransformer([("trans", TransRaise(), 0)])
|
623 |
+
for func in [col_trans.fit, col_trans.fit_transform]:
|
624 |
+
with pytest.raises(ValueError, match="specific message"):
|
625 |
+
func(X_array)
|
626 |
+
|
627 |
+
|
628 |
+
def test_2D_transformer_output():
|
629 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
630 |
+
|
631 |
+
# if one transformer is dropped, test that name is still correct
|
632 |
+
ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)])
|
633 |
+
|
634 |
+
msg = "the 'trans2' transformer should be 2D"
|
635 |
+
with pytest.raises(ValueError, match=msg):
|
636 |
+
ct.fit_transform(X_array)
|
637 |
+
# because fit is also doing transform, this raises already on fit
|
638 |
+
with pytest.raises(ValueError, match=msg):
|
639 |
+
ct.fit(X_array)
|
640 |
+
|
641 |
+
|
642 |
+
def test_2D_transformer_output_pandas():
|
643 |
+
pd = pytest.importorskip("pandas")
|
644 |
+
|
645 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
646 |
+
X_df = pd.DataFrame(X_array, columns=["col1", "col2"])
|
647 |
+
|
648 |
+
# if one transformer is dropped, test that name is still correct
|
649 |
+
ct = ColumnTransformer([("trans1", TransNo2D(), "col1")])
|
650 |
+
msg = "the 'trans1' transformer should be 2D"
|
651 |
+
with pytest.raises(ValueError, match=msg):
|
652 |
+
ct.fit_transform(X_df)
|
653 |
+
# because fit is also doing transform, this raises already on fit
|
654 |
+
with pytest.raises(ValueError, match=msg):
|
655 |
+
ct.fit(X_df)
|
656 |
+
|
657 |
+
|
658 |
+
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
|
659 |
+
def test_column_transformer_invalid_columns(remainder):
|
660 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
661 |
+
|
662 |
+
# general invalid
|
663 |
+
for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]:
|
664 |
+
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
|
665 |
+
with pytest.raises(ValueError, match="No valid specification"):
|
666 |
+
ct.fit(X_array)
|
667 |
+
|
668 |
+
# invalid for arrays
|
669 |
+
for col in ["string", ["string", "other"], slice("a", "b")]:
|
670 |
+
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
|
671 |
+
with pytest.raises(ValueError, match="Specifying the columns"):
|
672 |
+
ct.fit(X_array)
|
673 |
+
|
674 |
+
# transformed n_features does not match fitted n_features
|
675 |
+
col = [0, 1]
|
676 |
+
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
|
677 |
+
ct.fit(X_array)
|
678 |
+
X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
|
679 |
+
msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input."
|
680 |
+
with pytest.raises(ValueError, match=msg):
|
681 |
+
ct.transform(X_array_more)
|
682 |
+
X_array_fewer = np.array(
|
683 |
+
[
|
684 |
+
[0, 1, 2],
|
685 |
+
]
|
686 |
+
).T
|
687 |
+
err_msg = (
|
688 |
+
"X has 1 features, but ColumnTransformer is expecting 2 features as input."
|
689 |
+
)
|
690 |
+
with pytest.raises(ValueError, match=err_msg):
|
691 |
+
ct.transform(X_array_fewer)
|
692 |
+
|
693 |
+
|
694 |
+
def test_column_transformer_invalid_transformer():
|
695 |
+
class NoTrans(BaseEstimator):
|
696 |
+
def fit(self, X, y=None):
|
697 |
+
return self
|
698 |
+
|
699 |
+
def predict(self, X):
|
700 |
+
return X
|
701 |
+
|
702 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
703 |
+
ct = ColumnTransformer([("trans", NoTrans(), [0])])
|
704 |
+
msg = "All estimators should implement fit and transform"
|
705 |
+
with pytest.raises(TypeError, match=msg):
|
706 |
+
ct.fit(X_array)
|
707 |
+
|
708 |
+
|
709 |
+
def test_make_column_transformer():
|
710 |
+
scaler = StandardScaler()
|
711 |
+
norm = Normalizer()
|
712 |
+
ct = make_column_transformer((scaler, "first"), (norm, ["second"]))
|
713 |
+
names, transformers, columns = zip(*ct.transformers)
|
714 |
+
assert names == ("standardscaler", "normalizer")
|
715 |
+
assert transformers == (scaler, norm)
|
716 |
+
assert columns == ("first", ["second"])
|
717 |
+
|
718 |
+
|
719 |
+
def test_make_column_transformer_pandas():
|
720 |
+
pd = pytest.importorskip("pandas")
|
721 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
722 |
+
X_df = pd.DataFrame(X_array, columns=["first", "second"])
|
723 |
+
norm = Normalizer()
|
724 |
+
ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)])
|
725 |
+
ct2 = make_column_transformer((norm, X_df.columns))
|
726 |
+
assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df))
|
727 |
+
|
728 |
+
|
729 |
+
def test_make_column_transformer_kwargs():
|
730 |
+
scaler = StandardScaler()
|
731 |
+
norm = Normalizer()
|
732 |
+
ct = make_column_transformer(
|
733 |
+
(scaler, "first"),
|
734 |
+
(norm, ["second"]),
|
735 |
+
n_jobs=3,
|
736 |
+
remainder="drop",
|
737 |
+
sparse_threshold=0.5,
|
738 |
+
)
|
739 |
+
assert (
|
740 |
+
ct.transformers
|
741 |
+
== make_column_transformer((scaler, "first"), (norm, ["second"])).transformers
|
742 |
+
)
|
743 |
+
assert ct.n_jobs == 3
|
744 |
+
assert ct.remainder == "drop"
|
745 |
+
assert ct.sparse_threshold == 0.5
|
746 |
+
# invalid keyword parameters should raise an error message
|
747 |
+
msg = re.escape(
|
748 |
+
"make_column_transformer() got an unexpected "
|
749 |
+
"keyword argument 'transformer_weights'"
|
750 |
+
)
|
751 |
+
with pytest.raises(TypeError, match=msg):
|
752 |
+
make_column_transformer(
|
753 |
+
(scaler, "first"),
|
754 |
+
(norm, ["second"]),
|
755 |
+
transformer_weights={"pca": 10, "Transf": 1},
|
756 |
+
)
|
757 |
+
|
758 |
+
|
759 |
+
def test_make_column_transformer_remainder_transformer():
|
760 |
+
scaler = StandardScaler()
|
761 |
+
norm = Normalizer()
|
762 |
+
remainder = StandardScaler()
|
763 |
+
ct = make_column_transformer(
|
764 |
+
(scaler, "first"), (norm, ["second"]), remainder=remainder
|
765 |
+
)
|
766 |
+
assert ct.remainder == remainder
|
767 |
+
|
768 |
+
|
769 |
+
def test_column_transformer_get_set_params():
|
770 |
+
ct = ColumnTransformer(
|
771 |
+
[("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])]
|
772 |
+
)
|
773 |
+
|
774 |
+
exp = {
|
775 |
+
"n_jobs": None,
|
776 |
+
"remainder": "drop",
|
777 |
+
"sparse_threshold": 0.3,
|
778 |
+
"trans1": ct.transformers[0][1],
|
779 |
+
"trans1__copy": True,
|
780 |
+
"trans1__with_mean": True,
|
781 |
+
"trans1__with_std": True,
|
782 |
+
"trans2": ct.transformers[1][1],
|
783 |
+
"trans2__copy": True,
|
784 |
+
"trans2__with_mean": True,
|
785 |
+
"trans2__with_std": True,
|
786 |
+
"transformers": ct.transformers,
|
787 |
+
"transformer_weights": None,
|
788 |
+
"verbose_feature_names_out": True,
|
789 |
+
"verbose": False,
|
790 |
+
}
|
791 |
+
|
792 |
+
assert ct.get_params() == exp
|
793 |
+
|
794 |
+
ct.set_params(trans1__with_mean=False)
|
795 |
+
assert not ct.get_params()["trans1__with_mean"]
|
796 |
+
|
797 |
+
ct.set_params(trans1="passthrough")
|
798 |
+
exp = {
|
799 |
+
"n_jobs": None,
|
800 |
+
"remainder": "drop",
|
801 |
+
"sparse_threshold": 0.3,
|
802 |
+
"trans1": "passthrough",
|
803 |
+
"trans2": ct.transformers[1][1],
|
804 |
+
"trans2__copy": True,
|
805 |
+
"trans2__with_mean": True,
|
806 |
+
"trans2__with_std": True,
|
807 |
+
"transformers": ct.transformers,
|
808 |
+
"transformer_weights": None,
|
809 |
+
"verbose_feature_names_out": True,
|
810 |
+
"verbose": False,
|
811 |
+
}
|
812 |
+
|
813 |
+
assert ct.get_params() == exp
|
814 |
+
|
815 |
+
|
816 |
+
def test_column_transformer_named_estimators():
|
817 |
+
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
|
818 |
+
ct = ColumnTransformer(
|
819 |
+
[
|
820 |
+
("trans1", StandardScaler(), [0]),
|
821 |
+
("trans2", StandardScaler(with_std=False), [1]),
|
822 |
+
]
|
823 |
+
)
|
824 |
+
assert not hasattr(ct, "transformers_")
|
825 |
+
ct.fit(X_array)
|
826 |
+
assert hasattr(ct, "transformers_")
|
827 |
+
assert isinstance(ct.named_transformers_["trans1"], StandardScaler)
|
828 |
+
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
|
829 |
+
assert isinstance(ct.named_transformers_["trans2"], StandardScaler)
|
830 |
+
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
|
831 |
+
assert not ct.named_transformers_.trans2.with_std
|
832 |
+
# check it are fitted transformers
|
833 |
+
assert ct.named_transformers_.trans1.mean_ == 1.0
|
834 |
+
|
835 |
+
|
836 |
+
def test_column_transformer_cloning():
|
837 |
+
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
|
838 |
+
|
839 |
+
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
|
840 |
+
ct.fit(X_array)
|
841 |
+
assert not hasattr(ct.transformers[0][1], "mean_")
|
842 |
+
assert hasattr(ct.transformers_[0][1], "mean_")
|
843 |
+
|
844 |
+
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
|
845 |
+
ct.fit_transform(X_array)
|
846 |
+
assert not hasattr(ct.transformers[0][1], "mean_")
|
847 |
+
assert hasattr(ct.transformers_[0][1], "mean_")
|
848 |
+
|
849 |
+
|
850 |
+
def test_column_transformer_get_feature_names():
|
851 |
+
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
|
852 |
+
ct = ColumnTransformer([("trans", Trans(), [0, 1])])
|
853 |
+
# raise correct error when not fitted
|
854 |
+
with pytest.raises(NotFittedError):
|
855 |
+
ct.get_feature_names_out()
|
856 |
+
# raise correct error when no feature names are available
|
857 |
+
ct.fit(X_array)
|
858 |
+
msg = re.escape(
|
859 |
+
"Transformer trans (type Trans) does not provide get_feature_names_out"
|
860 |
+
)
|
861 |
+
with pytest.raises(AttributeError, match=msg):
|
862 |
+
ct.get_feature_names_out()
|
863 |
+
|
864 |
+
|
865 |
+
def test_column_transformer_special_strings():
|
866 |
+
# one 'drop' -> ignore
|
867 |
+
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
|
868 |
+
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])])
|
869 |
+
exp = np.array([[0.0], [1.0], [2.0]])
|
870 |
+
assert_array_equal(ct.fit_transform(X_array), exp)
|
871 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
|
872 |
+
assert len(ct.transformers_) == 2
|
873 |
+
assert ct.transformers_[-1][0] != "remainder"
|
874 |
+
|
875 |
+
# all 'drop' -> return shape 0 array
|
876 |
+
ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])])
|
877 |
+
assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
|
878 |
+
assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
|
879 |
+
assert len(ct.transformers_) == 2
|
880 |
+
assert ct.transformers_[-1][0] != "remainder"
|
881 |
+
|
882 |
+
# 'passthrough'
|
883 |
+
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
|
884 |
+
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])])
|
885 |
+
exp = X_array
|
886 |
+
assert_array_equal(ct.fit_transform(X_array), exp)
|
887 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
|
888 |
+
assert len(ct.transformers_) == 2
|
889 |
+
assert ct.transformers_[-1][0] != "remainder"
|
890 |
+
|
891 |
+
|
892 |
+
def test_column_transformer_remainder():
|
893 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
894 |
+
|
895 |
+
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
|
896 |
+
X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
|
897 |
+
X_res_both = X_array
|
898 |
+
|
899 |
+
# default drop
|
900 |
+
ct = ColumnTransformer([("trans1", Trans(), [0])])
|
901 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_first)
|
902 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
|
903 |
+
assert len(ct.transformers_) == 2
|
904 |
+
assert ct.transformers_[-1][0] == "remainder"
|
905 |
+
assert ct.transformers_[-1][1] == "drop"
|
906 |
+
assert_array_equal(ct.transformers_[-1][2], [1])
|
907 |
+
|
908 |
+
# specify passthrough
|
909 |
+
ct = ColumnTransformer([("trans", Trans(), [0])], remainder="passthrough")
|
910 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_both)
|
911 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
|
912 |
+
assert len(ct.transformers_) == 2
|
913 |
+
assert ct.transformers_[-1][0] == "remainder"
|
914 |
+
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
|
915 |
+
assert_array_equal(ct.transformers_[-1][2], [1])
|
916 |
+
|
917 |
+
# column order is not preserved (passed through added to end)
|
918 |
+
ct = ColumnTransformer([("trans1", Trans(), [1])], remainder="passthrough")
|
919 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
|
920 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
|
921 |
+
assert len(ct.transformers_) == 2
|
922 |
+
assert ct.transformers_[-1][0] == "remainder"
|
923 |
+
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
|
924 |
+
assert_array_equal(ct.transformers_[-1][2], [0])
|
925 |
+
|
926 |
+
# passthrough when all actual transformers are skipped
|
927 |
+
ct = ColumnTransformer([("trans1", "drop", [0])], remainder="passthrough")
|
928 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_second)
|
929 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
|
930 |
+
assert len(ct.transformers_) == 2
|
931 |
+
assert ct.transformers_[-1][0] == "remainder"
|
932 |
+
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
|
933 |
+
assert_array_equal(ct.transformers_[-1][2], [1])
|
934 |
+
|
935 |
+
# check default for make_column_transformer
|
936 |
+
ct = make_column_transformer((Trans(), [0]))
|
937 |
+
assert ct.remainder == "drop"
|
938 |
+
|
939 |
+
|
940 |
+
@pytest.mark.parametrize(
|
941 |
+
"key", [[0], np.array([0]), slice(0, 1), np.array([True, False])]
|
942 |
+
)
|
943 |
+
def test_column_transformer_remainder_numpy(key):
|
944 |
+
# test different ways that columns are specified with passthrough
|
945 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
946 |
+
X_res_both = X_array
|
947 |
+
|
948 |
+
ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough")
|
949 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_both)
|
950 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
|
951 |
+
assert len(ct.transformers_) == 2
|
952 |
+
assert ct.transformers_[-1][0] == "remainder"
|
953 |
+
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
|
954 |
+
assert_array_equal(ct.transformers_[-1][2], [1])
|
955 |
+
|
956 |
+
|
957 |
+
@pytest.mark.parametrize(
|
958 |
+
"key",
|
959 |
+
[
|
960 |
+
[0],
|
961 |
+
slice(0, 1),
|
962 |
+
np.array([True, False]),
|
963 |
+
["first"],
|
964 |
+
"pd-index",
|
965 |
+
np.array(["first"]),
|
966 |
+
np.array(["first"], dtype=object),
|
967 |
+
slice(None, "first"),
|
968 |
+
slice("first", "first"),
|
969 |
+
],
|
970 |
+
)
|
971 |
+
def test_column_transformer_remainder_pandas(key):
|
972 |
+
# test different ways that columns are specified with passthrough
|
973 |
+
pd = pytest.importorskip("pandas")
|
974 |
+
if isinstance(key, str) and key == "pd-index":
|
975 |
+
key = pd.Index(["first"])
|
976 |
+
|
977 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
978 |
+
X_df = pd.DataFrame(X_array, columns=["first", "second"])
|
979 |
+
X_res_both = X_array
|
980 |
+
|
981 |
+
ct = ColumnTransformer([("trans1", Trans(), key)], remainder="passthrough")
|
982 |
+
assert_array_equal(ct.fit_transform(X_df), X_res_both)
|
983 |
+
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
|
984 |
+
assert len(ct.transformers_) == 2
|
985 |
+
assert ct.transformers_[-1][0] == "remainder"
|
986 |
+
assert isinstance(ct.transformers_[-1][1], FunctionTransformer)
|
987 |
+
assert_array_equal(ct.transformers_[-1][2], [1])
|
988 |
+
|
989 |
+
|
990 |
+
@pytest.mark.parametrize(
|
991 |
+
"key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])]
|
992 |
+
)
|
993 |
+
def test_column_transformer_remainder_transformer(key):
|
994 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
|
995 |
+
X_res_both = X_array.copy()
|
996 |
+
|
997 |
+
# second and third columns are doubled when remainder = DoubleTrans
|
998 |
+
X_res_both[:, 1:3] *= 2
|
999 |
+
|
1000 |
+
ct = ColumnTransformer([("trans1", Trans(), key)], remainder=DoubleTrans())
|
1001 |
+
|
1002 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_both)
|
1003 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
|
1004 |
+
assert len(ct.transformers_) == 2
|
1005 |
+
assert ct.transformers_[-1][0] == "remainder"
|
1006 |
+
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
|
1007 |
+
assert_array_equal(ct.transformers_[-1][2], [1, 2])
|
1008 |
+
|
1009 |
+
|
1010 |
+
def test_column_transformer_no_remaining_remainder_transformer():
|
1011 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
|
1012 |
+
|
1013 |
+
ct = ColumnTransformer([("trans1", Trans(), [0, 1, 2])], remainder=DoubleTrans())
|
1014 |
+
|
1015 |
+
assert_array_equal(ct.fit_transform(X_array), X_array)
|
1016 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
|
1017 |
+
assert len(ct.transformers_) == 1
|
1018 |
+
assert ct.transformers_[-1][0] != "remainder"
|
1019 |
+
|
1020 |
+
|
1021 |
+
def test_column_transformer_drops_all_remainder_transformer():
|
1022 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
|
1023 |
+
|
1024 |
+
# columns are doubled when remainder = DoubleTrans
|
1025 |
+
X_res_both = 2 * X_array.copy()[:, 1:3]
|
1026 |
+
|
1027 |
+
ct = ColumnTransformer([("trans1", "drop", [0])], remainder=DoubleTrans())
|
1028 |
+
|
1029 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_both)
|
1030 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
|
1031 |
+
assert len(ct.transformers_) == 2
|
1032 |
+
assert ct.transformers_[-1][0] == "remainder"
|
1033 |
+
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
|
1034 |
+
assert_array_equal(ct.transformers_[-1][2], [1, 2])
|
1035 |
+
|
1036 |
+
|
1037 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1038 |
+
def test_column_transformer_sparse_remainder_transformer(csr_container):
|
1039 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
|
1040 |
+
|
1041 |
+
ct = ColumnTransformer(
|
1042 |
+
[("trans1", Trans(), [0])],
|
1043 |
+
remainder=SparseMatrixTrans(csr_container),
|
1044 |
+
sparse_threshold=0.8,
|
1045 |
+
)
|
1046 |
+
|
1047 |
+
X_trans = ct.fit_transform(X_array)
|
1048 |
+
assert sparse.issparse(X_trans)
|
1049 |
+
# SparseMatrixTrans creates 3 features for each column. There is
|
1050 |
+
# one column in ``transformers``, thus:
|
1051 |
+
assert X_trans.shape == (3, 3 + 1)
|
1052 |
+
|
1053 |
+
exp_array = np.hstack((X_array[:, 0].reshape(-1, 1), np.eye(3)))
|
1054 |
+
assert_array_equal(X_trans.toarray(), exp_array)
|
1055 |
+
assert len(ct.transformers_) == 2
|
1056 |
+
assert ct.transformers_[-1][0] == "remainder"
|
1057 |
+
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
|
1058 |
+
assert_array_equal(ct.transformers_[-1][2], [1, 2])
|
1059 |
+
|
1060 |
+
|
1061 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1062 |
+
def test_column_transformer_drop_all_sparse_remainder_transformer(csr_container):
|
1063 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
|
1064 |
+
ct = ColumnTransformer(
|
1065 |
+
[("trans1", "drop", [0])],
|
1066 |
+
remainder=SparseMatrixTrans(csr_container),
|
1067 |
+
sparse_threshold=0.8,
|
1068 |
+
)
|
1069 |
+
|
1070 |
+
X_trans = ct.fit_transform(X_array)
|
1071 |
+
assert sparse.issparse(X_trans)
|
1072 |
+
|
1073 |
+
# SparseMatrixTrans creates 3 features for each column, thus:
|
1074 |
+
assert X_trans.shape == (3, 3)
|
1075 |
+
assert_array_equal(X_trans.toarray(), np.eye(3))
|
1076 |
+
assert len(ct.transformers_) == 2
|
1077 |
+
assert ct.transformers_[-1][0] == "remainder"
|
1078 |
+
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
|
1079 |
+
assert_array_equal(ct.transformers_[-1][2], [1, 2])
|
1080 |
+
|
1081 |
+
|
1082 |
+
def test_column_transformer_get_set_params_with_remainder():
|
1083 |
+
ct = ColumnTransformer(
|
1084 |
+
[("trans1", StandardScaler(), [0])], remainder=StandardScaler()
|
1085 |
+
)
|
1086 |
+
|
1087 |
+
exp = {
|
1088 |
+
"n_jobs": None,
|
1089 |
+
"remainder": ct.remainder,
|
1090 |
+
"remainder__copy": True,
|
1091 |
+
"remainder__with_mean": True,
|
1092 |
+
"remainder__with_std": True,
|
1093 |
+
"sparse_threshold": 0.3,
|
1094 |
+
"trans1": ct.transformers[0][1],
|
1095 |
+
"trans1__copy": True,
|
1096 |
+
"trans1__with_mean": True,
|
1097 |
+
"trans1__with_std": True,
|
1098 |
+
"transformers": ct.transformers,
|
1099 |
+
"transformer_weights": None,
|
1100 |
+
"verbose_feature_names_out": True,
|
1101 |
+
"verbose": False,
|
1102 |
+
}
|
1103 |
+
|
1104 |
+
assert ct.get_params() == exp
|
1105 |
+
|
1106 |
+
ct.set_params(remainder__with_std=False)
|
1107 |
+
assert not ct.get_params()["remainder__with_std"]
|
1108 |
+
|
1109 |
+
ct.set_params(trans1="passthrough")
|
1110 |
+
exp = {
|
1111 |
+
"n_jobs": None,
|
1112 |
+
"remainder": ct.remainder,
|
1113 |
+
"remainder__copy": True,
|
1114 |
+
"remainder__with_mean": True,
|
1115 |
+
"remainder__with_std": False,
|
1116 |
+
"sparse_threshold": 0.3,
|
1117 |
+
"trans1": "passthrough",
|
1118 |
+
"transformers": ct.transformers,
|
1119 |
+
"transformer_weights": None,
|
1120 |
+
"verbose_feature_names_out": True,
|
1121 |
+
"verbose": False,
|
1122 |
+
}
|
1123 |
+
assert ct.get_params() == exp
|
1124 |
+
|
1125 |
+
|
1126 |
+
def test_column_transformer_no_estimators():
|
1127 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype("float").T
|
1128 |
+
ct = ColumnTransformer([], remainder=StandardScaler())
|
1129 |
+
|
1130 |
+
params = ct.get_params()
|
1131 |
+
assert params["remainder__with_mean"]
|
1132 |
+
|
1133 |
+
X_trans = ct.fit_transform(X_array)
|
1134 |
+
assert X_trans.shape == X_array.shape
|
1135 |
+
assert len(ct.transformers_) == 1
|
1136 |
+
assert ct.transformers_[-1][0] == "remainder"
|
1137 |
+
assert ct.transformers_[-1][2] == [0, 1, 2]
|
1138 |
+
|
1139 |
+
|
1140 |
+
@pytest.mark.parametrize(
|
1141 |
+
["est", "pattern"],
|
1142 |
+
[
|
1143 |
+
(
|
1144 |
+
ColumnTransformer(
|
1145 |
+
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
|
1146 |
+
remainder=DoubleTrans(),
|
1147 |
+
),
|
1148 |
+
(
|
1149 |
+
r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
|
1150 |
+
r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
|
1151 |
+
r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
|
1152 |
+
),
|
1153 |
+
),
|
1154 |
+
(
|
1155 |
+
ColumnTransformer(
|
1156 |
+
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
|
1157 |
+
remainder="passthrough",
|
1158 |
+
),
|
1159 |
+
(
|
1160 |
+
r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
|
1161 |
+
r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
|
1162 |
+
r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
|
1163 |
+
),
|
1164 |
+
),
|
1165 |
+
(
|
1166 |
+
ColumnTransformer(
|
1167 |
+
[("trans1", Trans(), [0]), ("trans2", "drop", [1])],
|
1168 |
+
remainder="passthrough",
|
1169 |
+
),
|
1170 |
+
(
|
1171 |
+
r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
|
1172 |
+
r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
|
1173 |
+
),
|
1174 |
+
),
|
1175 |
+
(
|
1176 |
+
ColumnTransformer(
|
1177 |
+
[("trans1", Trans(), [0]), ("trans2", "passthrough", [1])],
|
1178 |
+
remainder="passthrough",
|
1179 |
+
),
|
1180 |
+
(
|
1181 |
+
r"\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n"
|
1182 |
+
r"\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n"
|
1183 |
+
r"\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$"
|
1184 |
+
),
|
1185 |
+
),
|
1186 |
+
(
|
1187 |
+
ColumnTransformer([("trans1", Trans(), [0])], remainder="passthrough"),
|
1188 |
+
(
|
1189 |
+
r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
|
1190 |
+
r"\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$"
|
1191 |
+
),
|
1192 |
+
),
|
1193 |
+
(
|
1194 |
+
ColumnTransformer(
|
1195 |
+
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])], remainder="drop"
|
1196 |
+
),
|
1197 |
+
(
|
1198 |
+
r"\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n"
|
1199 |
+
r"\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$"
|
1200 |
+
),
|
1201 |
+
),
|
1202 |
+
(
|
1203 |
+
ColumnTransformer([("trans1", Trans(), [0])], remainder="drop"),
|
1204 |
+
r"\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$",
|
1205 |
+
),
|
1206 |
+
],
|
1207 |
+
)
|
1208 |
+
@pytest.mark.parametrize("method", ["fit", "fit_transform"])
|
1209 |
+
def test_column_transformer_verbose(est, pattern, method, capsys):
|
1210 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
|
1211 |
+
|
1212 |
+
func = getattr(est, method)
|
1213 |
+
est.set_params(verbose=False)
|
1214 |
+
func(X_array)
|
1215 |
+
assert not capsys.readouterr().out, "Got output for verbose=False"
|
1216 |
+
|
1217 |
+
est.set_params(verbose=True)
|
1218 |
+
func(X_array)
|
1219 |
+
assert re.match(pattern, capsys.readouterr()[0])
|
1220 |
+
|
1221 |
+
|
1222 |
+
def test_column_transformer_no_estimators_set_params():
|
1223 |
+
ct = ColumnTransformer([]).set_params(n_jobs=2)
|
1224 |
+
assert ct.n_jobs == 2
|
1225 |
+
|
1226 |
+
|
1227 |
+
def test_column_transformer_callable_specifier():
|
1228 |
+
# assert that function gets the full array
|
1229 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
1230 |
+
X_res_first = np.array([[0, 1, 2]]).T
|
1231 |
+
|
1232 |
+
def func(X):
|
1233 |
+
assert_array_equal(X, X_array)
|
1234 |
+
return [0]
|
1235 |
+
|
1236 |
+
ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
|
1237 |
+
assert_array_equal(ct.fit_transform(X_array), X_res_first)
|
1238 |
+
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
|
1239 |
+
assert callable(ct.transformers[0][2])
|
1240 |
+
assert ct.transformers_[0][2] == [0]
|
1241 |
+
|
1242 |
+
|
1243 |
+
def test_column_transformer_callable_specifier_dataframe():
|
1244 |
+
# assert that function gets the full dataframe
|
1245 |
+
pd = pytest.importorskip("pandas")
|
1246 |
+
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
1247 |
+
X_res_first = np.array([[0, 1, 2]]).T
|
1248 |
+
|
1249 |
+
X_df = pd.DataFrame(X_array, columns=["first", "second"])
|
1250 |
+
|
1251 |
+
def func(X):
|
1252 |
+
assert_array_equal(X.columns, X_df.columns)
|
1253 |
+
assert_array_equal(X.values, X_df.values)
|
1254 |
+
return ["first"]
|
1255 |
+
|
1256 |
+
ct = ColumnTransformer([("trans", Trans(), func)], remainder="drop")
|
1257 |
+
assert_array_equal(ct.fit_transform(X_df), X_res_first)
|
1258 |
+
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
|
1259 |
+
assert callable(ct.transformers[0][2])
|
1260 |
+
assert ct.transformers_[0][2] == ["first"]
|
1261 |
+
|
1262 |
+
|
1263 |
+
def test_column_transformer_negative_column_indexes():
|
1264 |
+
X = np.random.randn(2, 2)
|
1265 |
+
X_categories = np.array([[1], [2]])
|
1266 |
+
X = np.concatenate([X, X_categories], axis=1)
|
1267 |
+
|
1268 |
+
ohe = OneHotEncoder()
|
1269 |
+
|
1270 |
+
tf_1 = ColumnTransformer([("ohe", ohe, [-1])], remainder="passthrough")
|
1271 |
+
tf_2 = ColumnTransformer([("ohe", ohe, [2])], remainder="passthrough")
|
1272 |
+
assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X))
|
1273 |
+
|
1274 |
+
|
1275 |
+
@pytest.mark.parametrize("array_type", [np.asarray, *CSR_CONTAINERS])
|
1276 |
+
def test_column_transformer_mask_indexing(array_type):
|
1277 |
+
# Regression test for #14510
|
1278 |
+
# Boolean array-like does not behave as boolean array with sparse matrices.
|
1279 |
+
X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]])
|
1280 |
+
X = array_type(X)
|
1281 |
+
column_transformer = ColumnTransformer(
|
1282 |
+
[("identity", FunctionTransformer(), [False, True, False, True])]
|
1283 |
+
)
|
1284 |
+
X_trans = column_transformer.fit_transform(X)
|
1285 |
+
assert X_trans.shape == (3, 2)
|
1286 |
+
|
1287 |
+
|
1288 |
+
def test_n_features_in():
|
1289 |
+
# make sure n_features_in is what is passed as input to the column
|
1290 |
+
# transformer.
|
1291 |
+
|
1292 |
+
X = [[1, 2], [3, 4], [5, 6]]
|
1293 |
+
ct = ColumnTransformer([("a", DoubleTrans(), [0]), ("b", DoubleTrans(), [1])])
|
1294 |
+
assert not hasattr(ct, "n_features_in_")
|
1295 |
+
ct.fit(X)
|
1296 |
+
assert ct.n_features_in_ == 2
|
1297 |
+
|
1298 |
+
|
1299 |
+
@pytest.mark.parametrize(
|
1300 |
+
"cols, pattern, include, exclude",
|
1301 |
+
[
|
1302 |
+
(["col_int", "col_float"], None, np.number, None),
|
1303 |
+
(["col_int", "col_float"], None, None, object),
|
1304 |
+
(["col_int", "col_float"], None, [int, float], None),
|
1305 |
+
(["col_str"], None, [object], None),
|
1306 |
+
(["col_str"], None, object, None),
|
1307 |
+
(["col_float"], None, float, None),
|
1308 |
+
(["col_float"], "at$", [np.number], None),
|
1309 |
+
(["col_int"], None, [int], None),
|
1310 |
+
(["col_int"], "^col_int", [np.number], None),
|
1311 |
+
(["col_float", "col_str"], "float|str", None, None),
|
1312 |
+
(["col_str"], "^col_s", None, [int]),
|
1313 |
+
([], "str$", float, None),
|
1314 |
+
(["col_int", "col_float", "col_str"], None, [np.number, object], None),
|
1315 |
+
],
|
1316 |
+
)
|
1317 |
+
def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude):
|
1318 |
+
pd = pytest.importorskip("pandas")
|
1319 |
+
|
1320 |
+
X_df = pd.DataFrame(
|
1321 |
+
{
|
1322 |
+
"col_int": np.array([0, 1, 2], dtype=int),
|
1323 |
+
"col_float": np.array([0.0, 1.0, 2.0], dtype=float),
|
1324 |
+
"col_str": ["one", "two", "three"],
|
1325 |
+
},
|
1326 |
+
columns=["col_int", "col_float", "col_str"],
|
1327 |
+
)
|
1328 |
+
|
1329 |
+
selector = make_column_selector(
|
1330 |
+
dtype_include=include, dtype_exclude=exclude, pattern=pattern
|
1331 |
+
)
|
1332 |
+
|
1333 |
+
assert_array_equal(selector(X_df), cols)
|
1334 |
+
|
1335 |
+
|
1336 |
+
def test_column_transformer_with_make_column_selector():
|
1337 |
+
# Functional test for column transformer + column selector
|
1338 |
+
pd = pytest.importorskip("pandas")
|
1339 |
+
X_df = pd.DataFrame(
|
1340 |
+
{
|
1341 |
+
"col_int": np.array([0, 1, 2], dtype=int),
|
1342 |
+
"col_float": np.array([0.0, 1.0, 2.0], dtype=float),
|
1343 |
+
"col_cat": ["one", "two", "one"],
|
1344 |
+
"col_str": ["low", "middle", "high"],
|
1345 |
+
},
|
1346 |
+
columns=["col_int", "col_float", "col_cat", "col_str"],
|
1347 |
+
)
|
1348 |
+
X_df["col_str"] = X_df["col_str"].astype("category")
|
1349 |
+
|
1350 |
+
cat_selector = make_column_selector(dtype_include=["category", object])
|
1351 |
+
num_selector = make_column_selector(dtype_include=np.number)
|
1352 |
+
|
1353 |
+
ohe = OneHotEncoder()
|
1354 |
+
scaler = StandardScaler()
|
1355 |
+
|
1356 |
+
ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector))
|
1357 |
+
ct_direct = make_column_transformer(
|
1358 |
+
(ohe, ["col_cat", "col_str"]), (scaler, ["col_float", "col_int"])
|
1359 |
+
)
|
1360 |
+
|
1361 |
+
X_selector = ct_selector.fit_transform(X_df)
|
1362 |
+
X_direct = ct_direct.fit_transform(X_df)
|
1363 |
+
|
1364 |
+
assert_allclose(X_selector, X_direct)
|
1365 |
+
|
1366 |
+
|
1367 |
+
def test_make_column_selector_error():
|
1368 |
+
selector = make_column_selector(dtype_include=np.number)
|
1369 |
+
X = np.array([[0.1, 0.2]])
|
1370 |
+
msg = "make_column_selector can only be applied to pandas dataframes"
|
1371 |
+
with pytest.raises(ValueError, match=msg):
|
1372 |
+
selector(X)
|
1373 |
+
|
1374 |
+
|
1375 |
+
def test_make_column_selector_pickle():
|
1376 |
+
pd = pytest.importorskip("pandas")
|
1377 |
+
|
1378 |
+
X_df = pd.DataFrame(
|
1379 |
+
{
|
1380 |
+
"col_int": np.array([0, 1, 2], dtype=int),
|
1381 |
+
"col_float": np.array([0.0, 1.0, 2.0], dtype=float),
|
1382 |
+
"col_str": ["one", "two", "three"],
|
1383 |
+
},
|
1384 |
+
columns=["col_int", "col_float", "col_str"],
|
1385 |
+
)
|
1386 |
+
|
1387 |
+
selector = make_column_selector(dtype_include=[object])
|
1388 |
+
selector_picked = pickle.loads(pickle.dumps(selector))
|
1389 |
+
|
1390 |
+
assert_array_equal(selector(X_df), selector_picked(X_df))
|
1391 |
+
|
1392 |
+
|
1393 |
+
@pytest.mark.parametrize(
|
1394 |
+
"empty_col",
|
1395 |
+
[[], np.array([], dtype=int), lambda x: []],
|
1396 |
+
ids=["list", "array", "callable"],
|
1397 |
+
)
|
1398 |
+
def test_feature_names_empty_columns(empty_col):
|
1399 |
+
pd = pytest.importorskip("pandas")
|
1400 |
+
|
1401 |
+
df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
|
1402 |
+
|
1403 |
+
ct = ColumnTransformer(
|
1404 |
+
transformers=[
|
1405 |
+
("ohe", OneHotEncoder(), ["col1", "col2"]),
|
1406 |
+
("empty_features", OneHotEncoder(), empty_col),
|
1407 |
+
],
|
1408 |
+
)
|
1409 |
+
|
1410 |
+
ct.fit(df)
|
1411 |
+
assert_array_equal(
|
1412 |
+
ct.get_feature_names_out(), ["ohe__col1_a", "ohe__col1_b", "ohe__col2_z"]
|
1413 |
+
)
|
1414 |
+
|
1415 |
+
|
1416 |
+
@pytest.mark.parametrize(
|
1417 |
+
"selector",
|
1418 |
+
[
|
1419 |
+
[1],
|
1420 |
+
lambda x: [1],
|
1421 |
+
["col2"],
|
1422 |
+
lambda x: ["col2"],
|
1423 |
+
[False, True],
|
1424 |
+
lambda x: [False, True],
|
1425 |
+
],
|
1426 |
+
)
|
1427 |
+
def test_feature_names_out_pandas(selector):
|
1428 |
+
"""Checks name when selecting only the second column"""
|
1429 |
+
pd = pytest.importorskip("pandas")
|
1430 |
+
df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
|
1431 |
+
ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
|
1432 |
+
ct.fit(df)
|
1433 |
+
|
1434 |
+
assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"])
|
1435 |
+
|
1436 |
+
|
1437 |
+
@pytest.mark.parametrize(
|
1438 |
+
"selector", [[1], lambda x: [1], [False, True], lambda x: [False, True]]
|
1439 |
+
)
|
1440 |
+
def test_feature_names_out_non_pandas(selector):
|
1441 |
+
"""Checks name when selecting the second column with numpy array"""
|
1442 |
+
X = [["a", "z"], ["a", "z"], ["b", "z"]]
|
1443 |
+
ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
|
1444 |
+
ct.fit(X)
|
1445 |
+
|
1446 |
+
assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"])
|
1447 |
+
|
1448 |
+
|
1449 |
+
@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
|
1450 |
+
def test_sk_visual_block_remainder(remainder):
|
1451 |
+
# remainder='passthrough' or an estimator will be shown in repr_html
|
1452 |
+
ohe = OneHotEncoder()
|
1453 |
+
ct = ColumnTransformer(
|
1454 |
+
transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
|
1455 |
+
)
|
1456 |
+
visual_block = ct._sk_visual_block_()
|
1457 |
+
assert visual_block.names == ("ohe", "remainder")
|
1458 |
+
assert visual_block.name_details == (["col1", "col2"], "")
|
1459 |
+
assert visual_block.estimators == (ohe, remainder)
|
1460 |
+
|
1461 |
+
|
1462 |
+
def test_sk_visual_block_remainder_drop():
|
1463 |
+
# remainder='drop' is not shown in repr_html
|
1464 |
+
ohe = OneHotEncoder()
|
1465 |
+
ct = ColumnTransformer(transformers=[("ohe", ohe, ["col1", "col2"])])
|
1466 |
+
visual_block = ct._sk_visual_block_()
|
1467 |
+
assert visual_block.names == ("ohe",)
|
1468 |
+
assert visual_block.name_details == (["col1", "col2"],)
|
1469 |
+
assert visual_block.estimators == (ohe,)
|
1470 |
+
|
1471 |
+
|
1472 |
+
@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
|
1473 |
+
def test_sk_visual_block_remainder_fitted_pandas(remainder):
|
1474 |
+
# Remainder shows the columns after fitting
|
1475 |
+
pd = pytest.importorskip("pandas")
|
1476 |
+
ohe = OneHotEncoder()
|
1477 |
+
ct = ColumnTransformer(
|
1478 |
+
transformers=[("ohe", ohe, ["col1", "col2"])], remainder=remainder
|
1479 |
+
)
|
1480 |
+
df = pd.DataFrame(
|
1481 |
+
{
|
1482 |
+
"col1": ["a", "b", "c"],
|
1483 |
+
"col2": ["z", "z", "z"],
|
1484 |
+
"col3": [1, 2, 3],
|
1485 |
+
"col4": [3, 4, 5],
|
1486 |
+
}
|
1487 |
+
)
|
1488 |
+
ct.fit(df)
|
1489 |
+
visual_block = ct._sk_visual_block_()
|
1490 |
+
assert visual_block.names == ("ohe", "remainder")
|
1491 |
+
assert visual_block.name_details == (["col1", "col2"], ["col3", "col4"])
|
1492 |
+
assert visual_block.estimators == (ohe, remainder)
|
1493 |
+
|
1494 |
+
|
1495 |
+
@pytest.mark.parametrize("remainder", ["passthrough", StandardScaler()])
|
1496 |
+
def test_sk_visual_block_remainder_fitted_numpy(remainder):
|
1497 |
+
# Remainder shows the indices after fitting
|
1498 |
+
X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float)
|
1499 |
+
scaler = StandardScaler()
|
1500 |
+
ct = ColumnTransformer(
|
1501 |
+
transformers=[("scale", scaler, [0, 2])], remainder=remainder
|
1502 |
+
)
|
1503 |
+
ct.fit(X)
|
1504 |
+
visual_block = ct._sk_visual_block_()
|
1505 |
+
assert visual_block.names == ("scale", "remainder")
|
1506 |
+
assert visual_block.name_details == ([0, 2], [1])
|
1507 |
+
assert visual_block.estimators == (scaler, remainder)
|
1508 |
+
|
1509 |
+
|
1510 |
+
@pytest.mark.parametrize("explicit_colname", ["first", "second", 0, 1])
|
1511 |
+
@pytest.mark.parametrize("remainder", [Trans(), "passthrough", "drop"])
|
1512 |
+
def test_column_transformer_reordered_column_names_remainder(
|
1513 |
+
explicit_colname, remainder
|
1514 |
+
):
|
1515 |
+
"""Test the interaction between remainder and column transformer"""
|
1516 |
+
pd = pytest.importorskip("pandas")
|
1517 |
+
|
1518 |
+
X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T
|
1519 |
+
X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"])
|
1520 |
+
|
1521 |
+
X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T
|
1522 |
+
X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"])
|
1523 |
+
|
1524 |
+
tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder)
|
1525 |
+
|
1526 |
+
tf.fit(X_fit_df)
|
1527 |
+
X_fit_trans = tf.transform(X_fit_df)
|
1528 |
+
|
1529 |
+
# Changing the order still works
|
1530 |
+
X_trans = tf.transform(X_trans_df)
|
1531 |
+
assert_allclose(X_trans, X_fit_trans)
|
1532 |
+
|
1533 |
+
# extra columns are ignored
|
1534 |
+
X_extended_df = X_fit_df.copy()
|
1535 |
+
X_extended_df["third"] = [3, 6, 9]
|
1536 |
+
X_trans = tf.transform(X_extended_df)
|
1537 |
+
assert_allclose(X_trans, X_fit_trans)
|
1538 |
+
|
1539 |
+
if isinstance(explicit_colname, str):
|
1540 |
+
# Raise error if columns are specified by names but input only allows
|
1541 |
+
# to specify by position, e.g. numpy array instead of a pandas df.
|
1542 |
+
X_array = X_fit_array.copy()
|
1543 |
+
err_msg = "Specifying the columns"
|
1544 |
+
with pytest.raises(ValueError, match=err_msg):
|
1545 |
+
tf.transform(X_array)
|
1546 |
+
|
1547 |
+
|
1548 |
+
def test_feature_name_validation_missing_columns_drop_passthough():
|
1549 |
+
"""Test the interaction between {'drop', 'passthrough'} and
|
1550 |
+
missing column names."""
|
1551 |
+
pd = pytest.importorskip("pandas")
|
1552 |
+
|
1553 |
+
X = np.ones(shape=(3, 4))
|
1554 |
+
df = pd.DataFrame(X, columns=["a", "b", "c", "d"])
|
1555 |
+
|
1556 |
+
df_dropped = df.drop("c", axis=1)
|
1557 |
+
|
1558 |
+
# with remainder='passthrough', all columns seen during `fit` must be
|
1559 |
+
# present
|
1560 |
+
tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough")
|
1561 |
+
tf.fit(df)
|
1562 |
+
msg = r"columns are missing: {'c'}"
|
1563 |
+
with pytest.raises(ValueError, match=msg):
|
1564 |
+
tf.transform(df_dropped)
|
1565 |
+
|
1566 |
+
# with remainder='drop', it is allowed to have column 'c' missing
|
1567 |
+
tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop")
|
1568 |
+
tf.fit(df)
|
1569 |
+
|
1570 |
+
df_dropped_trans = tf.transform(df_dropped)
|
1571 |
+
df_fit_trans = tf.transform(df)
|
1572 |
+
assert_allclose(df_dropped_trans, df_fit_trans)
|
1573 |
+
|
1574 |
+
# bycol drops 'c', thus it is allowed for 'c' to be missing
|
1575 |
+
tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough")
|
1576 |
+
tf.fit(df)
|
1577 |
+
df_dropped_trans = tf.transform(df_dropped)
|
1578 |
+
df_fit_trans = tf.transform(df)
|
1579 |
+
assert_allclose(df_dropped_trans, df_fit_trans)
|
1580 |
+
|
1581 |
+
|
1582 |
+
def test_feature_names_in_():
|
1583 |
+
"""Feature names are stored in column transformer.
|
1584 |
+
|
1585 |
+
Column transformer deliberately does not check for column name consistency.
|
1586 |
+
It only checks that the non-dropped names seen in `fit` are seen
|
1587 |
+
in `transform`. This behavior is already tested in
|
1588 |
+
`test_feature_name_validation_missing_columns_drop_passthough`"""
|
1589 |
+
|
1590 |
+
pd = pytest.importorskip("pandas")
|
1591 |
+
|
1592 |
+
feature_names = ["a", "c", "d"]
|
1593 |
+
df = pd.DataFrame([[1, 2, 3]], columns=feature_names)
|
1594 |
+
ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough")
|
1595 |
+
|
1596 |
+
ct.fit(df)
|
1597 |
+
assert_array_equal(ct.feature_names_in_, feature_names)
|
1598 |
+
assert isinstance(ct.feature_names_in_, np.ndarray)
|
1599 |
+
assert ct.feature_names_in_.dtype == object
|
1600 |
+
|
1601 |
+
|
1602 |
+
class TransWithNames(Trans):
|
1603 |
+
def __init__(self, feature_names_out=None):
|
1604 |
+
self.feature_names_out = feature_names_out
|
1605 |
+
|
1606 |
+
def get_feature_names_out(self, input_features=None):
|
1607 |
+
if self.feature_names_out is not None:
|
1608 |
+
return np.asarray(self.feature_names_out, dtype=object)
|
1609 |
+
return input_features
|
1610 |
+
|
1611 |
+
|
1612 |
+
@pytest.mark.parametrize(
|
1613 |
+
"transformers, remainder, expected_names",
|
1614 |
+
[
|
1615 |
+
(
|
1616 |
+
[
|
1617 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1618 |
+
("bycol2", "passthrough", ["d"]),
|
1619 |
+
],
|
1620 |
+
"passthrough",
|
1621 |
+
["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
|
1622 |
+
),
|
1623 |
+
(
|
1624 |
+
[
|
1625 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1626 |
+
("bycol2", "passthrough", ["d"]),
|
1627 |
+
],
|
1628 |
+
"drop",
|
1629 |
+
["bycol1__d", "bycol1__c", "bycol2__d"],
|
1630 |
+
),
|
1631 |
+
(
|
1632 |
+
[
|
1633 |
+
("bycol1", TransWithNames(), ["b"]),
|
1634 |
+
("bycol2", "drop", ["d"]),
|
1635 |
+
],
|
1636 |
+
"passthrough",
|
1637 |
+
["bycol1__b", "remainder__a", "remainder__c"],
|
1638 |
+
),
|
1639 |
+
(
|
1640 |
+
[
|
1641 |
+
("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
|
1642 |
+
],
|
1643 |
+
"passthrough",
|
1644 |
+
["bycol1__pca1", "bycol1__pca2", "remainder__c"],
|
1645 |
+
),
|
1646 |
+
(
|
1647 |
+
[
|
1648 |
+
("bycol1", TransWithNames(["a", "b"]), ["d"]),
|
1649 |
+
("bycol2", "passthrough", ["b"]),
|
1650 |
+
],
|
1651 |
+
"drop",
|
1652 |
+
["bycol1__a", "bycol1__b", "bycol2__b"],
|
1653 |
+
),
|
1654 |
+
(
|
1655 |
+
[
|
1656 |
+
("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
|
1657 |
+
("bycol2", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
|
1658 |
+
],
|
1659 |
+
"passthrough",
|
1660 |
+
[
|
1661 |
+
"bycol1__pca0",
|
1662 |
+
"bycol1__pca1",
|
1663 |
+
"bycol2__pca0",
|
1664 |
+
"bycol2__pca1",
|
1665 |
+
"remainder__a",
|
1666 |
+
"remainder__c",
|
1667 |
+
"remainder__d",
|
1668 |
+
],
|
1669 |
+
),
|
1670 |
+
(
|
1671 |
+
[
|
1672 |
+
("bycol1", "drop", ["d"]),
|
1673 |
+
],
|
1674 |
+
"drop",
|
1675 |
+
[],
|
1676 |
+
),
|
1677 |
+
(
|
1678 |
+
[
|
1679 |
+
("bycol1", TransWithNames(), slice(1, 3)),
|
1680 |
+
],
|
1681 |
+
"drop",
|
1682 |
+
["bycol1__b", "bycol1__c"],
|
1683 |
+
),
|
1684 |
+
(
|
1685 |
+
[
|
1686 |
+
("bycol1", TransWithNames(), ["b"]),
|
1687 |
+
("bycol2", "drop", slice(3, 4)),
|
1688 |
+
],
|
1689 |
+
"passthrough",
|
1690 |
+
["bycol1__b", "remainder__a", "remainder__c"],
|
1691 |
+
),
|
1692 |
+
(
|
1693 |
+
[
|
1694 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1695 |
+
("bycol2", "passthrough", slice(3, 4)),
|
1696 |
+
],
|
1697 |
+
"passthrough",
|
1698 |
+
["bycol1__d", "bycol1__c", "bycol2__d", "remainder__a", "remainder__b"],
|
1699 |
+
),
|
1700 |
+
(
|
1701 |
+
[
|
1702 |
+
("bycol1", TransWithNames(), slice("b", "c")),
|
1703 |
+
],
|
1704 |
+
"drop",
|
1705 |
+
["bycol1__b", "bycol1__c"],
|
1706 |
+
),
|
1707 |
+
(
|
1708 |
+
[
|
1709 |
+
("bycol1", TransWithNames(), ["b"]),
|
1710 |
+
("bycol2", "drop", slice("c", "d")),
|
1711 |
+
],
|
1712 |
+
"passthrough",
|
1713 |
+
["bycol1__b", "remainder__a"],
|
1714 |
+
),
|
1715 |
+
(
|
1716 |
+
[
|
1717 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1718 |
+
("bycol2", "passthrough", slice("c", "d")),
|
1719 |
+
],
|
1720 |
+
"passthrough",
|
1721 |
+
[
|
1722 |
+
"bycol1__d",
|
1723 |
+
"bycol1__c",
|
1724 |
+
"bycol2__c",
|
1725 |
+
"bycol2__d",
|
1726 |
+
"remainder__a",
|
1727 |
+
"remainder__b",
|
1728 |
+
],
|
1729 |
+
),
|
1730 |
+
],
|
1731 |
+
)
|
1732 |
+
def test_verbose_feature_names_out_true(transformers, remainder, expected_names):
|
1733 |
+
"""Check feature_names_out for verbose_feature_names_out=True (default)"""
|
1734 |
+
pd = pytest.importorskip("pandas")
|
1735 |
+
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
|
1736 |
+
ct = ColumnTransformer(
|
1737 |
+
transformers,
|
1738 |
+
remainder=remainder,
|
1739 |
+
)
|
1740 |
+
ct.fit(df)
|
1741 |
+
|
1742 |
+
names = ct.get_feature_names_out()
|
1743 |
+
assert isinstance(names, np.ndarray)
|
1744 |
+
assert names.dtype == object
|
1745 |
+
assert_array_equal(names, expected_names)
|
1746 |
+
|
1747 |
+
|
1748 |
+
@pytest.mark.parametrize(
|
1749 |
+
"transformers, remainder, expected_names",
|
1750 |
+
[
|
1751 |
+
(
|
1752 |
+
[
|
1753 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1754 |
+
("bycol2", "passthrough", ["a"]),
|
1755 |
+
],
|
1756 |
+
"passthrough",
|
1757 |
+
["d", "c", "a", "b"],
|
1758 |
+
),
|
1759 |
+
(
|
1760 |
+
[
|
1761 |
+
("bycol1", TransWithNames(["a"]), ["d", "c"]),
|
1762 |
+
("bycol2", "passthrough", ["d"]),
|
1763 |
+
],
|
1764 |
+
"drop",
|
1765 |
+
["a", "d"],
|
1766 |
+
),
|
1767 |
+
(
|
1768 |
+
[
|
1769 |
+
("bycol1", TransWithNames(), ["b"]),
|
1770 |
+
("bycol2", "drop", ["d"]),
|
1771 |
+
],
|
1772 |
+
"passthrough",
|
1773 |
+
["b", "a", "c"],
|
1774 |
+
),
|
1775 |
+
(
|
1776 |
+
[
|
1777 |
+
("bycol1", TransWithNames(["pca1", "pca2"]), ["a", "b", "d"]),
|
1778 |
+
],
|
1779 |
+
"passthrough",
|
1780 |
+
["pca1", "pca2", "c"],
|
1781 |
+
),
|
1782 |
+
(
|
1783 |
+
[
|
1784 |
+
("bycol1", TransWithNames(["a", "c"]), ["d"]),
|
1785 |
+
("bycol2", "passthrough", ["d"]),
|
1786 |
+
],
|
1787 |
+
"drop",
|
1788 |
+
["a", "c", "d"],
|
1789 |
+
),
|
1790 |
+
(
|
1791 |
+
[
|
1792 |
+
("bycol1", TransWithNames([f"pca{i}" for i in range(2)]), ["b"]),
|
1793 |
+
("bycol2", TransWithNames([f"kpca{i}" for i in range(2)]), ["b"]),
|
1794 |
+
],
|
1795 |
+
"passthrough",
|
1796 |
+
["pca0", "pca1", "kpca0", "kpca1", "a", "c", "d"],
|
1797 |
+
),
|
1798 |
+
(
|
1799 |
+
[
|
1800 |
+
("bycol1", "drop", ["d"]),
|
1801 |
+
],
|
1802 |
+
"drop",
|
1803 |
+
[],
|
1804 |
+
),
|
1805 |
+
(
|
1806 |
+
[
|
1807 |
+
("bycol1", TransWithNames(), slice(1, 2)),
|
1808 |
+
("bycol2", "drop", ["d"]),
|
1809 |
+
],
|
1810 |
+
"passthrough",
|
1811 |
+
["b", "a", "c"],
|
1812 |
+
),
|
1813 |
+
(
|
1814 |
+
[
|
1815 |
+
("bycol1", TransWithNames(), ["b"]),
|
1816 |
+
("bycol2", "drop", slice(3, 4)),
|
1817 |
+
],
|
1818 |
+
"passthrough",
|
1819 |
+
["b", "a", "c"],
|
1820 |
+
),
|
1821 |
+
(
|
1822 |
+
[
|
1823 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1824 |
+
("bycol2", "passthrough", slice(0, 2)),
|
1825 |
+
],
|
1826 |
+
"drop",
|
1827 |
+
["d", "c", "a", "b"],
|
1828 |
+
),
|
1829 |
+
(
|
1830 |
+
[
|
1831 |
+
("bycol1", TransWithNames(), slice("a", "b")),
|
1832 |
+
("bycol2", "drop", ["d"]),
|
1833 |
+
],
|
1834 |
+
"passthrough",
|
1835 |
+
["a", "b", "c"],
|
1836 |
+
),
|
1837 |
+
(
|
1838 |
+
[
|
1839 |
+
("bycol1", TransWithNames(), ["b"]),
|
1840 |
+
("bycol2", "drop", slice("c", "d")),
|
1841 |
+
],
|
1842 |
+
"passthrough",
|
1843 |
+
["b", "a"],
|
1844 |
+
),
|
1845 |
+
(
|
1846 |
+
[
|
1847 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1848 |
+
("bycol2", "passthrough", slice("a", "b")),
|
1849 |
+
],
|
1850 |
+
"drop",
|
1851 |
+
["d", "c", "a", "b"],
|
1852 |
+
),
|
1853 |
+
(
|
1854 |
+
[
|
1855 |
+
("bycol1", TransWithNames(), ["d", "c"]),
|
1856 |
+
("bycol2", "passthrough", slice("b", "b")),
|
1857 |
+
],
|
1858 |
+
"drop",
|
1859 |
+
["d", "c", "b"],
|
1860 |
+
),
|
1861 |
+
],
|
1862 |
+
)
|
1863 |
+
def test_verbose_feature_names_out_false(transformers, remainder, expected_names):
|
1864 |
+
"""Check feature_names_out for verbose_feature_names_out=False"""
|
1865 |
+
pd = pytest.importorskip("pandas")
|
1866 |
+
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
|
1867 |
+
ct = ColumnTransformer(
|
1868 |
+
transformers,
|
1869 |
+
remainder=remainder,
|
1870 |
+
verbose_feature_names_out=False,
|
1871 |
+
)
|
1872 |
+
ct.fit(df)
|
1873 |
+
|
1874 |
+
names = ct.get_feature_names_out()
|
1875 |
+
assert isinstance(names, np.ndarray)
|
1876 |
+
assert names.dtype == object
|
1877 |
+
assert_array_equal(names, expected_names)
|
1878 |
+
|
1879 |
+
|
1880 |
+
@pytest.mark.parametrize(
|
1881 |
+
"transformers, remainder, colliding_columns",
|
1882 |
+
[
|
1883 |
+
(
|
1884 |
+
[
|
1885 |
+
("bycol1", TransWithNames(), ["b"]),
|
1886 |
+
("bycol2", "passthrough", ["b"]),
|
1887 |
+
],
|
1888 |
+
"drop",
|
1889 |
+
"['b']",
|
1890 |
+
),
|
1891 |
+
(
|
1892 |
+
[
|
1893 |
+
("bycol1", TransWithNames(["c", "d"]), ["c"]),
|
1894 |
+
("bycol2", "passthrough", ["c"]),
|
1895 |
+
],
|
1896 |
+
"drop",
|
1897 |
+
"['c']",
|
1898 |
+
),
|
1899 |
+
(
|
1900 |
+
[
|
1901 |
+
("bycol1", TransWithNames(["a"]), ["b"]),
|
1902 |
+
("bycol2", "passthrough", ["b"]),
|
1903 |
+
],
|
1904 |
+
"passthrough",
|
1905 |
+
"['a']",
|
1906 |
+
),
|
1907 |
+
(
|
1908 |
+
[
|
1909 |
+
("bycol1", TransWithNames(["a"]), ["b"]),
|
1910 |
+
("bycol2", "drop", ["b"]),
|
1911 |
+
],
|
1912 |
+
"passthrough",
|
1913 |
+
"['a']",
|
1914 |
+
),
|
1915 |
+
(
|
1916 |
+
[
|
1917 |
+
("bycol1", TransWithNames(["c", "b"]), ["b"]),
|
1918 |
+
("bycol2", "passthrough", ["c", "b"]),
|
1919 |
+
],
|
1920 |
+
"drop",
|
1921 |
+
"['b', 'c']",
|
1922 |
+
),
|
1923 |
+
(
|
1924 |
+
[
|
1925 |
+
("bycol1", TransWithNames(["a"]), ["b"]),
|
1926 |
+
("bycol2", "passthrough", ["a"]),
|
1927 |
+
("bycol3", TransWithNames(["a"]), ["b"]),
|
1928 |
+
],
|
1929 |
+
"passthrough",
|
1930 |
+
"['a']",
|
1931 |
+
),
|
1932 |
+
(
|
1933 |
+
[
|
1934 |
+
("bycol1", TransWithNames(["a", "b"]), ["b"]),
|
1935 |
+
("bycol2", "passthrough", ["a"]),
|
1936 |
+
("bycol3", TransWithNames(["b"]), ["c"]),
|
1937 |
+
],
|
1938 |
+
"passthrough",
|
1939 |
+
"['a', 'b']",
|
1940 |
+
),
|
1941 |
+
(
|
1942 |
+
[
|
1943 |
+
("bycol1", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
|
1944 |
+
("bycol2", TransWithNames([f"pca{i}" for i in range(6)]), ["b"]),
|
1945 |
+
],
|
1946 |
+
"passthrough",
|
1947 |
+
"['pca0', 'pca1', 'pca2', 'pca3', 'pca4', ...]",
|
1948 |
+
),
|
1949 |
+
(
|
1950 |
+
[
|
1951 |
+
("bycol1", TransWithNames(["a", "b"]), slice(1, 2)),
|
1952 |
+
("bycol2", "passthrough", ["a"]),
|
1953 |
+
("bycol3", TransWithNames(["b"]), ["c"]),
|
1954 |
+
],
|
1955 |
+
"passthrough",
|
1956 |
+
"['a', 'b']",
|
1957 |
+
),
|
1958 |
+
(
|
1959 |
+
[
|
1960 |
+
("bycol1", TransWithNames(["a", "b"]), ["b"]),
|
1961 |
+
("bycol2", "passthrough", slice(0, 1)),
|
1962 |
+
("bycol3", TransWithNames(["b"]), ["c"]),
|
1963 |
+
],
|
1964 |
+
"passthrough",
|
1965 |
+
"['a', 'b']",
|
1966 |
+
),
|
1967 |
+
(
|
1968 |
+
[
|
1969 |
+
("bycol1", TransWithNames(["a", "b"]), slice("b", "c")),
|
1970 |
+
("bycol2", "passthrough", ["a"]),
|
1971 |
+
("bycol3", TransWithNames(["b"]), ["c"]),
|
1972 |
+
],
|
1973 |
+
"passthrough",
|
1974 |
+
"['a', 'b']",
|
1975 |
+
),
|
1976 |
+
(
|
1977 |
+
[
|
1978 |
+
("bycol1", TransWithNames(["a", "b"]), ["b"]),
|
1979 |
+
("bycol2", "passthrough", slice("a", "a")),
|
1980 |
+
("bycol3", TransWithNames(["b"]), ["c"]),
|
1981 |
+
],
|
1982 |
+
"passthrough",
|
1983 |
+
"['a', 'b']",
|
1984 |
+
),
|
1985 |
+
],
|
1986 |
+
)
|
1987 |
+
def test_verbose_feature_names_out_false_errors(
|
1988 |
+
transformers, remainder, colliding_columns
|
1989 |
+
):
|
1990 |
+
"""Check feature_names_out for verbose_feature_names_out=False"""
|
1991 |
+
|
1992 |
+
pd = pytest.importorskip("pandas")
|
1993 |
+
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"])
|
1994 |
+
ct = ColumnTransformer(
|
1995 |
+
transformers,
|
1996 |
+
remainder=remainder,
|
1997 |
+
verbose_feature_names_out=False,
|
1998 |
+
)
|
1999 |
+
ct.fit(df)
|
2000 |
+
|
2001 |
+
msg = re.escape(
|
2002 |
+
f"Output feature names: {colliding_columns} are not unique. Please set "
|
2003 |
+
"verbose_feature_names_out=True to add prefixes to feature names"
|
2004 |
+
)
|
2005 |
+
with pytest.raises(ValueError, match=msg):
|
2006 |
+
ct.get_feature_names_out()
|
2007 |
+
|
2008 |
+
|
2009 |
+
@pytest.mark.parametrize("verbose_feature_names_out", [True, False])
|
2010 |
+
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
|
2011 |
+
def test_column_transformer_set_output(verbose_feature_names_out, remainder):
|
2012 |
+
"""Check column transformer behavior with set_output."""
|
2013 |
+
pd = pytest.importorskip("pandas")
|
2014 |
+
df = pd.DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"], index=[10])
|
2015 |
+
ct = ColumnTransformer(
|
2016 |
+
[("first", TransWithNames(), ["a", "c"]), ("second", TransWithNames(), ["d"])],
|
2017 |
+
remainder=remainder,
|
2018 |
+
verbose_feature_names_out=verbose_feature_names_out,
|
2019 |
+
)
|
2020 |
+
X_trans = ct.fit_transform(df)
|
2021 |
+
assert isinstance(X_trans, np.ndarray)
|
2022 |
+
|
2023 |
+
ct.set_output(transform="pandas")
|
2024 |
+
|
2025 |
+
df_test = pd.DataFrame([[1, 2, 3, 4]], columns=df.columns, index=[20])
|
2026 |
+
X_trans = ct.transform(df_test)
|
2027 |
+
assert isinstance(X_trans, pd.DataFrame)
|
2028 |
+
|
2029 |
+
feature_names_out = ct.get_feature_names_out()
|
2030 |
+
assert_array_equal(X_trans.columns, feature_names_out)
|
2031 |
+
assert_array_equal(X_trans.index, df_test.index)
|
2032 |
+
|
2033 |
+
|
2034 |
+
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
|
2035 |
+
@pytest.mark.parametrize("fit_transform", [True, False])
|
2036 |
+
def test_column_transform_set_output_mixed(remainder, fit_transform):
|
2037 |
+
"""Check ColumnTransformer outputs mixed types correctly."""
|
2038 |
+
pd = pytest.importorskip("pandas")
|
2039 |
+
df = pd.DataFrame(
|
2040 |
+
{
|
2041 |
+
"pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
|
2042 |
+
"color": pd.Series(["green", "blue", "red"], dtype="object"),
|
2043 |
+
"age": [1.4, 2.1, 4.4],
|
2044 |
+
"height": [20, 40, 10],
|
2045 |
+
"distance": pd.Series([20, pd.NA, 100], dtype="Int32"),
|
2046 |
+
}
|
2047 |
+
)
|
2048 |
+
ct = ColumnTransformer(
|
2049 |
+
[
|
2050 |
+
(
|
2051 |
+
"color_encode",
|
2052 |
+
OneHotEncoder(sparse_output=False, dtype="int8"),
|
2053 |
+
["color"],
|
2054 |
+
),
|
2055 |
+
("age", StandardScaler(), ["age"]),
|
2056 |
+
],
|
2057 |
+
remainder=remainder,
|
2058 |
+
verbose_feature_names_out=False,
|
2059 |
+
).set_output(transform="pandas")
|
2060 |
+
if fit_transform:
|
2061 |
+
X_trans = ct.fit_transform(df)
|
2062 |
+
else:
|
2063 |
+
X_trans = ct.fit(df).transform(df)
|
2064 |
+
|
2065 |
+
assert isinstance(X_trans, pd.DataFrame)
|
2066 |
+
assert_array_equal(X_trans.columns, ct.get_feature_names_out())
|
2067 |
+
|
2068 |
+
expected_dtypes = {
|
2069 |
+
"color_blue": "int8",
|
2070 |
+
"color_green": "int8",
|
2071 |
+
"color_red": "int8",
|
2072 |
+
"age": "float64",
|
2073 |
+
"pet": "category",
|
2074 |
+
"height": "int64",
|
2075 |
+
"distance": "Int32",
|
2076 |
+
}
|
2077 |
+
for col, dtype in X_trans.dtypes.items():
|
2078 |
+
assert dtype == expected_dtypes[col]
|
2079 |
+
|
2080 |
+
|
2081 |
+
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
|
2082 |
+
def test_column_transform_set_output_after_fitting(remainder):
|
2083 |
+
pd = pytest.importorskip("pandas")
|
2084 |
+
df = pd.DataFrame(
|
2085 |
+
{
|
2086 |
+
"pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
|
2087 |
+
"age": [1.4, 2.1, 4.4],
|
2088 |
+
"height": [20, 40, 10],
|
2089 |
+
}
|
2090 |
+
)
|
2091 |
+
ct = ColumnTransformer(
|
2092 |
+
[
|
2093 |
+
(
|
2094 |
+
"color_encode",
|
2095 |
+
OneHotEncoder(sparse_output=False, dtype="int16"),
|
2096 |
+
["pet"],
|
2097 |
+
),
|
2098 |
+
("age", StandardScaler(), ["age"]),
|
2099 |
+
],
|
2100 |
+
remainder=remainder,
|
2101 |
+
verbose_feature_names_out=False,
|
2102 |
+
)
|
2103 |
+
|
2104 |
+
# fit without calling set_output
|
2105 |
+
X_trans = ct.fit_transform(df)
|
2106 |
+
assert isinstance(X_trans, np.ndarray)
|
2107 |
+
assert X_trans.dtype == "float64"
|
2108 |
+
|
2109 |
+
ct.set_output(transform="pandas")
|
2110 |
+
X_trans_df = ct.transform(df)
|
2111 |
+
expected_dtypes = {
|
2112 |
+
"pet_cat": "int16",
|
2113 |
+
"pet_dog": "int16",
|
2114 |
+
"pet_snake": "int16",
|
2115 |
+
"height": "int64",
|
2116 |
+
"age": "float64",
|
2117 |
+
}
|
2118 |
+
for col, dtype in X_trans_df.dtypes.items():
|
2119 |
+
assert dtype == expected_dtypes[col]
|
2120 |
+
|
2121 |
+
|
2122 |
+
# PandasOutTransformer that does not define get_feature_names_out and always expects
|
2123 |
+
# the input to be a DataFrame.
|
2124 |
+
class PandasOutTransformer(BaseEstimator):
|
2125 |
+
def __init__(self, offset=1.0):
|
2126 |
+
self.offset = offset
|
2127 |
+
|
2128 |
+
def fit(self, X, y=None):
|
2129 |
+
pd = pytest.importorskip("pandas")
|
2130 |
+
assert isinstance(X, pd.DataFrame)
|
2131 |
+
return self
|
2132 |
+
|
2133 |
+
def transform(self, X, y=None):
|
2134 |
+
pd = pytest.importorskip("pandas")
|
2135 |
+
assert isinstance(X, pd.DataFrame)
|
2136 |
+
return X - self.offset
|
2137 |
+
|
2138 |
+
def set_output(self, transform=None):
|
2139 |
+
# This transformer will always output a DataFrame regardless of the
|
2140 |
+
# configuration.
|
2141 |
+
return self
|
2142 |
+
|
2143 |
+
|
2144 |
+
@pytest.mark.parametrize(
|
2145 |
+
"trans_1, expected_verbose_names, expected_non_verbose_names",
|
2146 |
+
[
|
2147 |
+
(
|
2148 |
+
PandasOutTransformer(offset=2.0),
|
2149 |
+
["trans_0__feat1", "trans_1__feat0"],
|
2150 |
+
["feat1", "feat0"],
|
2151 |
+
),
|
2152 |
+
(
|
2153 |
+
"drop",
|
2154 |
+
["trans_0__feat1"],
|
2155 |
+
["feat1"],
|
2156 |
+
),
|
2157 |
+
(
|
2158 |
+
"passthrough",
|
2159 |
+
["trans_0__feat1", "trans_1__feat0"],
|
2160 |
+
["feat1", "feat0"],
|
2161 |
+
),
|
2162 |
+
],
|
2163 |
+
)
|
2164 |
+
def test_transformers_with_pandas_out_but_not_feature_names_out(
|
2165 |
+
trans_1, expected_verbose_names, expected_non_verbose_names
|
2166 |
+
):
|
2167 |
+
"""Check that set_config(transform="pandas") is compatible with more transformers.
|
2168 |
+
|
2169 |
+
Specifically, if transformers returns a DataFrame, but does not define
|
2170 |
+
`get_feature_names_out`.
|
2171 |
+
"""
|
2172 |
+
pd = pytest.importorskip("pandas")
|
2173 |
+
|
2174 |
+
X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]})
|
2175 |
+
ct = ColumnTransformer(
|
2176 |
+
[
|
2177 |
+
("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]),
|
2178 |
+
("trans_1", trans_1, ["feat0"]),
|
2179 |
+
]
|
2180 |
+
)
|
2181 |
+
X_trans_np = ct.fit_transform(X_df)
|
2182 |
+
assert isinstance(X_trans_np, np.ndarray)
|
2183 |
+
|
2184 |
+
# `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does
|
2185 |
+
# not define the method.
|
2186 |
+
with pytest.raises(AttributeError, match="not provide get_feature_names_out"):
|
2187 |
+
ct.get_feature_names_out()
|
2188 |
+
|
2189 |
+
# The feature names are prefixed because verbose_feature_names_out=True is default
|
2190 |
+
ct.set_output(transform="pandas")
|
2191 |
+
X_trans_df0 = ct.fit_transform(X_df)
|
2192 |
+
assert_array_equal(X_trans_df0.columns, expected_verbose_names)
|
2193 |
+
|
2194 |
+
ct.set_params(verbose_feature_names_out=False)
|
2195 |
+
X_trans_df1 = ct.fit_transform(X_df)
|
2196 |
+
assert_array_equal(X_trans_df1.columns, expected_non_verbose_names)
|
2197 |
+
|
2198 |
+
|
2199 |
+
@pytest.mark.parametrize(
|
2200 |
+
"empty_selection",
|
2201 |
+
[[], np.array([False, False]), [False, False]],
|
2202 |
+
ids=["list", "bool", "bool_int"],
|
2203 |
+
)
|
2204 |
+
def test_empty_selection_pandas_output(empty_selection):
|
2205 |
+
"""Check that pandas output works when there is an empty selection.
|
2206 |
+
|
2207 |
+
Non-regression test for gh-25487
|
2208 |
+
"""
|
2209 |
+
pd = pytest.importorskip("pandas")
|
2210 |
+
|
2211 |
+
X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"])
|
2212 |
+
ct = ColumnTransformer(
|
2213 |
+
[
|
2214 |
+
("categorical", "passthrough", empty_selection),
|
2215 |
+
("numerical", StandardScaler(), ["a", "b"]),
|
2216 |
+
],
|
2217 |
+
verbose_feature_names_out=True,
|
2218 |
+
)
|
2219 |
+
ct.set_output(transform="pandas")
|
2220 |
+
X_out = ct.fit_transform(X)
|
2221 |
+
assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"])
|
2222 |
+
|
2223 |
+
ct.set_params(verbose_feature_names_out=False)
|
2224 |
+
X_out = ct.fit_transform(X)
|
2225 |
+
assert_array_equal(X_out.columns, ["a", "b"])
|
2226 |
+
|
2227 |
+
|
2228 |
+
def test_raise_error_if_index_not_aligned():
|
2229 |
+
"""Check column transformer raises error if indices are not aligned.
|
2230 |
+
|
2231 |
+
Non-regression test for gh-26210.
|
2232 |
+
"""
|
2233 |
+
pd = pytest.importorskip("pandas")
|
2234 |
+
|
2235 |
+
X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3])
|
2236 |
+
reset_index_transformer = FunctionTransformer(
|
2237 |
+
lambda x: x.reset_index(drop=True), feature_names_out="one-to-one"
|
2238 |
+
)
|
2239 |
+
|
2240 |
+
ct = ColumnTransformer(
|
2241 |
+
[
|
2242 |
+
("num1", "passthrough", ["a"]),
|
2243 |
+
("num2", reset_index_transformer, ["b"]),
|
2244 |
+
],
|
2245 |
+
)
|
2246 |
+
ct.set_output(transform="pandas")
|
2247 |
+
msg = (
|
2248 |
+
"Concatenating DataFrames from the transformer's output lead to"
|
2249 |
+
" an inconsistent number of samples. The output may have Pandas"
|
2250 |
+
" Indexes that do not match."
|
2251 |
+
)
|
2252 |
+
with pytest.raises(ValueError, match=msg):
|
2253 |
+
ct.fit_transform(X)
|
2254 |
+
|
2255 |
+
|
2256 |
+
def test_remainder_set_output():
|
2257 |
+
"""Check that the output is set for the remainder.
|
2258 |
+
|
2259 |
+
Non-regression test for #26306.
|
2260 |
+
"""
|
2261 |
+
|
2262 |
+
pd = pytest.importorskip("pandas")
|
2263 |
+
df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]})
|
2264 |
+
|
2265 |
+
ct = make_column_transformer(
|
2266 |
+
(VarianceThreshold(), make_column_selector(dtype_include=bool)),
|
2267 |
+
remainder=VarianceThreshold(),
|
2268 |
+
verbose_feature_names_out=False,
|
2269 |
+
)
|
2270 |
+
ct.set_output(transform="pandas")
|
2271 |
+
|
2272 |
+
out = ct.fit_transform(df)
|
2273 |
+
pd.testing.assert_frame_equal(out, df)
|
2274 |
+
|
2275 |
+
ct.set_output(transform="default")
|
2276 |
+
out = ct.fit_transform(df)
|
2277 |
+
assert isinstance(out, np.ndarray)
|
2278 |
+
|
2279 |
+
|
2280 |
+
# TODO(1.6): replace the warning by a ValueError exception
|
2281 |
+
def test_transform_pd_na():
|
2282 |
+
"""Check behavior when a tranformer's output contains pandas.NA
|
2283 |
+
|
2284 |
+
It should emit a warning unless the output config is set to 'pandas'.
|
2285 |
+
"""
|
2286 |
+
pd = pytest.importorskip("pandas")
|
2287 |
+
if not hasattr(pd, "Float64Dtype"):
|
2288 |
+
pytest.skip(
|
2289 |
+
"The issue with pd.NA tested here does not happen in old versions that do"
|
2290 |
+
" not have the extension dtypes"
|
2291 |
+
)
|
2292 |
+
df = pd.DataFrame({"a": [1.5, None]})
|
2293 |
+
ct = make_column_transformer(("passthrough", ["a"]))
|
2294 |
+
# No warning with non-extension dtypes and np.nan
|
2295 |
+
with warnings.catch_warnings():
|
2296 |
+
warnings.simplefilter("error")
|
2297 |
+
ct.fit_transform(df)
|
2298 |
+
df = df.convert_dtypes()
|
2299 |
+
# Error with extension dtype and pd.NA
|
2300 |
+
with pytest.warns(FutureWarning, match=r"set_output\(transform='pandas'\)"):
|
2301 |
+
ct.fit_transform(df)
|
2302 |
+
# No warning when output is set to pandas
|
2303 |
+
with warnings.catch_warnings():
|
2304 |
+
warnings.simplefilter("error")
|
2305 |
+
ct.set_output(transform="pandas")
|
2306 |
+
ct.fit_transform(df)
|
2307 |
+
ct.set_output(transform="default")
|
2308 |
+
# No warning when there are no pd.NA
|
2309 |
+
with warnings.catch_warnings():
|
2310 |
+
warnings.simplefilter("error")
|
2311 |
+
ct.fit_transform(df.fillna(-1.0))
|
2312 |
+
|
2313 |
+
|
2314 |
+
def test_dataframe_different_dataframe_libraries():
|
2315 |
+
"""Check fitting and transforming on pandas and polars dataframes."""
|
2316 |
+
pd = pytest.importorskip("pandas")
|
2317 |
+
pl = pytest.importorskip("polars")
|
2318 |
+
X_train_np = np.array([[0, 1], [2, 4], [4, 5]])
|
2319 |
+
X_test_np = np.array([[1, 2], [1, 3], [2, 3]])
|
2320 |
+
|
2321 |
+
# Fit on pandas and transform on polars
|
2322 |
+
X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"])
|
2323 |
+
X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"])
|
2324 |
+
|
2325 |
+
ct = make_column_transformer((Trans(), [0, 1]))
|
2326 |
+
ct.fit(X_train_pd)
|
2327 |
+
|
2328 |
+
out_pl_in = ct.transform(X_test_pl)
|
2329 |
+
assert_array_equal(out_pl_in, X_test_np)
|
2330 |
+
|
2331 |
+
# Fit on polars and transform on pandas
|
2332 |
+
X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"])
|
2333 |
+
X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"])
|
2334 |
+
ct.fit(X_train_pl)
|
2335 |
+
|
2336 |
+
out_pd_in = ct.transform(X_test_pd)
|
2337 |
+
assert_array_equal(out_pd_in, X_test_np)
|
2338 |
+
|
2339 |
+
|
2340 |
+
@pytest.mark.parametrize("transform_output", ["default", "pandas"])
|
2341 |
+
def test_column_transformer_remainder_passthrough_naming_consistency(transform_output):
|
2342 |
+
"""Check that when `remainder="passthrough"`, inconsistent naming is handled
|
2343 |
+
correctly by the underlying `FunctionTransformer`.
|
2344 |
+
|
2345 |
+
Non-regression test for:
|
2346 |
+
https://github.com/scikit-learn/scikit-learn/issues/28232
|
2347 |
+
"""
|
2348 |
+
pd = pytest.importorskip("pandas")
|
2349 |
+
X = pd.DataFrame(np.random.randn(10, 4))
|
2350 |
+
|
2351 |
+
preprocessor = ColumnTransformer(
|
2352 |
+
transformers=[("scaler", StandardScaler(), [0, 1])],
|
2353 |
+
remainder="passthrough",
|
2354 |
+
).set_output(transform=transform_output)
|
2355 |
+
X_trans = preprocessor.fit_transform(X)
|
2356 |
+
assert X_trans.shape == X.shape
|
2357 |
+
|
2358 |
+
expected_column_names = [
|
2359 |
+
"scaler__x0",
|
2360 |
+
"scaler__x1",
|
2361 |
+
"remainder__x2",
|
2362 |
+
"remainder__x3",
|
2363 |
+
]
|
2364 |
+
if hasattr(X_trans, "columns"):
|
2365 |
+
assert X_trans.columns.tolist() == expected_column_names
|
2366 |
+
assert preprocessor.get_feature_names_out().tolist() == expected_column_names
|
2367 |
+
|
2368 |
+
|
2369 |
+
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
|
2370 |
+
def test_column_transformer_column_renaming(dataframe_lib):
|
2371 |
+
"""Check that we properly rename columns when using `ColumnTransformer` and
|
2372 |
+
selected columns are redundant between transformers.
|
2373 |
+
|
2374 |
+
Non-regression test for:
|
2375 |
+
https://github.com/scikit-learn/scikit-learn/issues/28260
|
2376 |
+
"""
|
2377 |
+
lib = pytest.importorskip(dataframe_lib)
|
2378 |
+
|
2379 |
+
df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
|
2380 |
+
|
2381 |
+
transformer = ColumnTransformer(
|
2382 |
+
transformers=[
|
2383 |
+
("A", "passthrough", ["x1", "x2", "x3"]),
|
2384 |
+
("B", FunctionTransformer(), ["x1", "x2"]),
|
2385 |
+
("C", StandardScaler(), ["x1", "x3"]),
|
2386 |
+
# special case of empty transformer
|
2387 |
+
("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]),
|
2388 |
+
],
|
2389 |
+
verbose_feature_names_out=True,
|
2390 |
+
).set_output(transform=dataframe_lib)
|
2391 |
+
df_trans = transformer.fit_transform(df)
|
2392 |
+
assert list(df_trans.columns) == [
|
2393 |
+
"A__x1",
|
2394 |
+
"A__x2",
|
2395 |
+
"A__x3",
|
2396 |
+
"B__x1",
|
2397 |
+
"B__x2",
|
2398 |
+
"C__x1",
|
2399 |
+
"C__x3",
|
2400 |
+
]
|
2401 |
+
|
2402 |
+
|
2403 |
+
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
|
2404 |
+
def test_column_transformer_error_with_duplicated_columns(dataframe_lib):
|
2405 |
+
"""Check that we raise an error when using `ColumnTransformer` and
|
2406 |
+
the columns names are duplicated between transformers."""
|
2407 |
+
lib = pytest.importorskip(dataframe_lib)
|
2408 |
+
|
2409 |
+
df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
|
2410 |
+
|
2411 |
+
transformer = ColumnTransformer(
|
2412 |
+
transformers=[
|
2413 |
+
("A", "passthrough", ["x1", "x2", "x3"]),
|
2414 |
+
("B", FunctionTransformer(), ["x1", "x2"]),
|
2415 |
+
("C", StandardScaler(), ["x1", "x3"]),
|
2416 |
+
# special case of empty transformer
|
2417 |
+
("D", FunctionTransformer(lambda x: x[[]]), ["x1", "x2", "x3"]),
|
2418 |
+
],
|
2419 |
+
verbose_feature_names_out=False,
|
2420 |
+
).set_output(transform=dataframe_lib)
|
2421 |
+
err_msg = re.escape(
|
2422 |
+
"Duplicated feature names found before concatenating the outputs of the "
|
2423 |
+
"transformers: ['x1', 'x2', 'x3'].\n"
|
2424 |
+
"Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n"
|
2425 |
+
"Transformer B has conflicting columns names: ['x1', 'x2'].\n"
|
2426 |
+
"Transformer C has conflicting columns names: ['x1', 'x3'].\n"
|
2427 |
+
)
|
2428 |
+
with pytest.raises(ValueError, match=err_msg):
|
2429 |
+
transformer.fit_transform(df)
|
2430 |
+
|
2431 |
+
|
2432 |
+
# Metadata Routing Tests
|
2433 |
+
# ======================
|
2434 |
+
|
2435 |
+
|
2436 |
+
@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
|
2437 |
+
def test_routing_passed_metadata_not_supported(method):
|
2438 |
+
"""Test that the right error message is raised when metadata is passed while
|
2439 |
+
not supported when `enable_metadata_routing=False`."""
|
2440 |
+
|
2441 |
+
X = np.array([[0, 1, 2], [2, 4, 6]]).T
|
2442 |
+
y = [1, 2, 3]
|
2443 |
+
trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y)
|
2444 |
+
|
2445 |
+
with pytest.raises(
|
2446 |
+
ValueError, match="is only supported if enable_metadata_routing=True"
|
2447 |
+
):
|
2448 |
+
getattr(trs, method)([[1]], sample_weight=[1], prop="a")
|
2449 |
+
|
2450 |
+
|
2451 |
+
@pytest.mark.usefixtures("enable_slep006")
|
2452 |
+
@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
|
2453 |
+
def test_metadata_routing_for_column_transformer(method):
|
2454 |
+
"""Test that metadata is routed correctly for column transformer."""
|
2455 |
+
X = np.array([[0, 1, 2], [2, 4, 6]]).T
|
2456 |
+
y = [1, 2, 3]
|
2457 |
+
registry = _Registry()
|
2458 |
+
sample_weight, metadata = [1], "a"
|
2459 |
+
trs = ColumnTransformer(
|
2460 |
+
[
|
2461 |
+
(
|
2462 |
+
"trans",
|
2463 |
+
ConsumingTransformer(registry=registry)
|
2464 |
+
.set_fit_request(sample_weight=True, metadata=True)
|
2465 |
+
.set_transform_request(sample_weight=True, metadata=True),
|
2466 |
+
[0],
|
2467 |
+
)
|
2468 |
+
]
|
2469 |
+
)
|
2470 |
+
|
2471 |
+
if method == "transform":
|
2472 |
+
trs.fit(X, y)
|
2473 |
+
trs.transform(X, sample_weight=sample_weight, metadata=metadata)
|
2474 |
+
else:
|
2475 |
+
getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
|
2476 |
+
|
2477 |
+
assert len(registry)
|
2478 |
+
for _trs in registry:
|
2479 |
+
check_recorded_metadata(
|
2480 |
+
obj=_trs, method=method, sample_weight=sample_weight, metadata=metadata
|
2481 |
+
)
|
2482 |
+
|
2483 |
+
|
2484 |
+
@pytest.mark.usefixtures("enable_slep006")
|
2485 |
+
def test_metadata_routing_no_fit_transform():
|
2486 |
+
"""Test metadata routing when the sub-estimator doesn't implement
|
2487 |
+
``fit_transform``."""
|
2488 |
+
|
2489 |
+
class NoFitTransform(BaseEstimator):
|
2490 |
+
def fit(self, X, y=None, sample_weight=None, metadata=None):
|
2491 |
+
assert sample_weight
|
2492 |
+
assert metadata
|
2493 |
+
return self
|
2494 |
+
|
2495 |
+
def transform(self, X, sample_weight=None, metadata=None):
|
2496 |
+
assert sample_weight
|
2497 |
+
assert metadata
|
2498 |
+
return X
|
2499 |
+
|
2500 |
+
X = np.array([[0, 1, 2], [2, 4, 6]]).T
|
2501 |
+
y = [1, 2, 3]
|
2502 |
+
_Registry()
|
2503 |
+
sample_weight, metadata = [1], "a"
|
2504 |
+
trs = ColumnTransformer(
|
2505 |
+
[
|
2506 |
+
(
|
2507 |
+
"trans",
|
2508 |
+
NoFitTransform()
|
2509 |
+
.set_fit_request(sample_weight=True, metadata=True)
|
2510 |
+
.set_transform_request(sample_weight=True, metadata=True),
|
2511 |
+
[0],
|
2512 |
+
)
|
2513 |
+
]
|
2514 |
+
)
|
2515 |
+
|
2516 |
+
trs.fit(X, y, sample_weight=sample_weight, metadata=metadata)
|
2517 |
+
trs.fit_transform(X, y, sample_weight=sample_weight, metadata=metadata)
|
2518 |
+
|
2519 |
+
|
2520 |
+
@pytest.mark.usefixtures("enable_slep006")
|
2521 |
+
@pytest.mark.parametrize("method", ["transform", "fit_transform", "fit"])
|
2522 |
+
def test_metadata_routing_error_for_column_transformer(method):
|
2523 |
+
"""Test that the right error is raised when metadata is not requested."""
|
2524 |
+
X = np.array([[0, 1, 2], [2, 4, 6]]).T
|
2525 |
+
y = [1, 2, 3]
|
2526 |
+
sample_weight, metadata = [1], "a"
|
2527 |
+
trs = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
|
2528 |
+
|
2529 |
+
error_message = (
|
2530 |
+
"[sample_weight, metadata] are passed but are not explicitly set as requested"
|
2531 |
+
f" or not for ConsumingTransformer.{method}"
|
2532 |
+
)
|
2533 |
+
with pytest.raises(ValueError, match=re.escape(error_message)):
|
2534 |
+
if method == "transform":
|
2535 |
+
trs.fit(X, y)
|
2536 |
+
trs.transform(X, sample_weight=sample_weight, metadata=metadata)
|
2537 |
+
else:
|
2538 |
+
getattr(trs, method)(X, y, sample_weight=sample_weight, metadata=metadata)
|
2539 |
+
|
2540 |
+
|
2541 |
+
@pytest.mark.usefixtures("enable_slep006")
|
2542 |
+
def test_get_metadata_routing_works_without_fit():
|
2543 |
+
# Regression test for https://github.com/scikit-learn/scikit-learn/issues/28186
|
2544 |
+
# Make sure ct.get_metadata_routing() works w/o having called fit.
|
2545 |
+
ct = ColumnTransformer([("trans", ConsumingTransformer(), [0])])
|
2546 |
+
ct.get_metadata_routing()
|
2547 |
+
|
2548 |
+
|
2549 |
+
@pytest.mark.usefixtures("enable_slep006")
|
2550 |
+
def test_remainder_request_always_present():
|
2551 |
+
# Test that remainder request is always present.
|
2552 |
+
ct = ColumnTransformer(
|
2553 |
+
[("trans", StandardScaler(), [0])],
|
2554 |
+
remainder=ConsumingTransformer()
|
2555 |
+
.set_fit_request(metadata=True)
|
2556 |
+
.set_transform_request(metadata=True),
|
2557 |
+
)
|
2558 |
+
router = ct.get_metadata_routing()
|
2559 |
+
assert router.consumes("fit", ["metadata"]) == set(["metadata"])
|
2560 |
+
|
2561 |
+
|
2562 |
+
@pytest.mark.usefixtures("enable_slep006")
|
2563 |
+
def test_unused_transformer_request_present():
|
2564 |
+
# Test that the request of a transformer is always present even when not
|
2565 |
+
# used due to no selected columns.
|
2566 |
+
ct = ColumnTransformer(
|
2567 |
+
[
|
2568 |
+
(
|
2569 |
+
"trans",
|
2570 |
+
ConsumingTransformer()
|
2571 |
+
.set_fit_request(metadata=True)
|
2572 |
+
.set_transform_request(metadata=True),
|
2573 |
+
lambda X: [],
|
2574 |
+
)
|
2575 |
+
]
|
2576 |
+
)
|
2577 |
+
router = ct.get_metadata_routing()
|
2578 |
+
assert router.consumes("fit", ["metadata"]) == set(["metadata"])
|
2579 |
+
|
2580 |
+
|
2581 |
+
# End of Metadata Routing Tests
|
2582 |
+
# =============================
|
env-llmeval/lib/python3.10/site-packages/sklearn/compose/tests/test_target.py
ADDED
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from sklearn import datasets
|
5 |
+
from sklearn.base import BaseEstimator, TransformerMixin, clone
|
6 |
+
from sklearn.compose import TransformedTargetRegressor
|
7 |
+
from sklearn.dummy import DummyRegressor
|
8 |
+
from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit
|
9 |
+
from sklearn.pipeline import Pipeline
|
10 |
+
from sklearn.preprocessing import FunctionTransformer, StandardScaler
|
11 |
+
from sklearn.utils._testing import assert_allclose, assert_no_warnings
|
12 |
+
|
13 |
+
friedman = datasets.make_friedman1(random_state=0)
|
14 |
+
|
15 |
+
|
16 |
+
def test_transform_target_regressor_error():
|
17 |
+
X, y = friedman
|
18 |
+
# provide a transformer and functions at the same time
|
19 |
+
regr = TransformedTargetRegressor(
|
20 |
+
regressor=LinearRegression(),
|
21 |
+
transformer=StandardScaler(),
|
22 |
+
func=np.exp,
|
23 |
+
inverse_func=np.log,
|
24 |
+
)
|
25 |
+
with pytest.raises(
|
26 |
+
ValueError,
|
27 |
+
match="'transformer' and functions 'func'/'inverse_func' cannot both be set.",
|
28 |
+
):
|
29 |
+
regr.fit(X, y)
|
30 |
+
# fit with sample_weight with a regressor which does not support it
|
31 |
+
sample_weight = np.ones((y.shape[0],))
|
32 |
+
regr = TransformedTargetRegressor(
|
33 |
+
regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler()
|
34 |
+
)
|
35 |
+
with pytest.raises(
|
36 |
+
TypeError,
|
37 |
+
match=r"fit\(\) got an unexpected " "keyword argument 'sample_weight'",
|
38 |
+
):
|
39 |
+
regr.fit(X, y, sample_weight=sample_weight)
|
40 |
+
# func is given but inverse_func is not
|
41 |
+
regr = TransformedTargetRegressor(func=np.exp)
|
42 |
+
with pytest.raises(
|
43 |
+
ValueError,
|
44 |
+
match="When 'func' is provided, 'inverse_func' must also be provided",
|
45 |
+
):
|
46 |
+
regr.fit(X, y)
|
47 |
+
|
48 |
+
|
49 |
+
def test_transform_target_regressor_invertible():
|
50 |
+
X, y = friedman
|
51 |
+
regr = TransformedTargetRegressor(
|
52 |
+
regressor=LinearRegression(),
|
53 |
+
func=np.sqrt,
|
54 |
+
inverse_func=np.log,
|
55 |
+
check_inverse=True,
|
56 |
+
)
|
57 |
+
with pytest.warns(
|
58 |
+
UserWarning,
|
59 |
+
match=(
|
60 |
+
"The provided functions or"
|
61 |
+
" transformer are not strictly inverse of each other."
|
62 |
+
),
|
63 |
+
):
|
64 |
+
regr.fit(X, y)
|
65 |
+
regr = TransformedTargetRegressor(
|
66 |
+
regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log
|
67 |
+
)
|
68 |
+
regr.set_params(check_inverse=False)
|
69 |
+
assert_no_warnings(regr.fit, X, y)
|
70 |
+
|
71 |
+
|
72 |
+
def _check_standard_scaled(y, y_pred):
|
73 |
+
y_mean = np.mean(y, axis=0)
|
74 |
+
y_std = np.std(y, axis=0)
|
75 |
+
assert_allclose((y - y_mean) / y_std, y_pred)
|
76 |
+
|
77 |
+
|
78 |
+
def _check_shifted_by_one(y, y_pred):
|
79 |
+
assert_allclose(y + 1, y_pred)
|
80 |
+
|
81 |
+
|
82 |
+
def test_transform_target_regressor_functions():
|
83 |
+
X, y = friedman
|
84 |
+
regr = TransformedTargetRegressor(
|
85 |
+
regressor=LinearRegression(), func=np.log, inverse_func=np.exp
|
86 |
+
)
|
87 |
+
y_pred = regr.fit(X, y).predict(X)
|
88 |
+
# check the transformer output
|
89 |
+
y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
|
90 |
+
assert_allclose(np.log(y), y_tran)
|
91 |
+
assert_allclose(
|
92 |
+
y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze()
|
93 |
+
)
|
94 |
+
assert y.shape == y_pred.shape
|
95 |
+
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
|
96 |
+
# check the regressor output
|
97 |
+
lr = LinearRegression().fit(X, regr.func(y))
|
98 |
+
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
|
99 |
+
|
100 |
+
|
101 |
+
def test_transform_target_regressor_functions_multioutput():
|
102 |
+
X = friedman[0]
|
103 |
+
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
|
104 |
+
regr = TransformedTargetRegressor(
|
105 |
+
regressor=LinearRegression(), func=np.log, inverse_func=np.exp
|
106 |
+
)
|
107 |
+
y_pred = regr.fit(X, y).predict(X)
|
108 |
+
# check the transformer output
|
109 |
+
y_tran = regr.transformer_.transform(y)
|
110 |
+
assert_allclose(np.log(y), y_tran)
|
111 |
+
assert_allclose(y, regr.transformer_.inverse_transform(y_tran))
|
112 |
+
assert y.shape == y_pred.shape
|
113 |
+
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
|
114 |
+
# check the regressor output
|
115 |
+
lr = LinearRegression().fit(X, regr.func(y))
|
116 |
+
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
|
117 |
+
|
118 |
+
|
119 |
+
@pytest.mark.parametrize(
|
120 |
+
"X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
|
121 |
+
)
|
122 |
+
def test_transform_target_regressor_1d_transformer(X, y):
|
123 |
+
# All transformer in scikit-learn expect 2D data. FunctionTransformer with
|
124 |
+
# validate=False lift this constraint without checking that the input is a
|
125 |
+
# 2D vector. We check the consistency of the data shape using a 1D and 2D y
|
126 |
+
# array.
|
127 |
+
transformer = FunctionTransformer(
|
128 |
+
func=lambda x: x + 1, inverse_func=lambda x: x - 1
|
129 |
+
)
|
130 |
+
regr = TransformedTargetRegressor(
|
131 |
+
regressor=LinearRegression(), transformer=transformer
|
132 |
+
)
|
133 |
+
y_pred = regr.fit(X, y).predict(X)
|
134 |
+
assert y.shape == y_pred.shape
|
135 |
+
# consistency forward transform
|
136 |
+
y_tran = regr.transformer_.transform(y)
|
137 |
+
_check_shifted_by_one(y, y_tran)
|
138 |
+
assert y.shape == y_pred.shape
|
139 |
+
# consistency inverse transform
|
140 |
+
assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
|
141 |
+
# consistency of the regressor
|
142 |
+
lr = LinearRegression()
|
143 |
+
transformer2 = clone(transformer)
|
144 |
+
lr.fit(X, transformer2.fit_transform(y))
|
145 |
+
y_lr_pred = lr.predict(X)
|
146 |
+
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
|
147 |
+
assert_allclose(regr.regressor_.coef_, lr.coef_)
|
148 |
+
|
149 |
+
|
150 |
+
@pytest.mark.parametrize(
|
151 |
+
"X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
|
152 |
+
)
|
153 |
+
def test_transform_target_regressor_2d_transformer(X, y):
|
154 |
+
# Check consistency with transformer accepting only 2D array and a 1D/2D y
|
155 |
+
# array.
|
156 |
+
transformer = StandardScaler()
|
157 |
+
regr = TransformedTargetRegressor(
|
158 |
+
regressor=LinearRegression(), transformer=transformer
|
159 |
+
)
|
160 |
+
y_pred = regr.fit(X, y).predict(X)
|
161 |
+
assert y.shape == y_pred.shape
|
162 |
+
# consistency forward transform
|
163 |
+
if y.ndim == 1: # create a 2D array and squeeze results
|
164 |
+
y_tran = regr.transformer_.transform(y.reshape(-1, 1))
|
165 |
+
else:
|
166 |
+
y_tran = regr.transformer_.transform(y)
|
167 |
+
_check_standard_scaled(y, y_tran.squeeze())
|
168 |
+
assert y.shape == y_pred.shape
|
169 |
+
# consistency inverse transform
|
170 |
+
assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
|
171 |
+
# consistency of the regressor
|
172 |
+
lr = LinearRegression()
|
173 |
+
transformer2 = clone(transformer)
|
174 |
+
if y.ndim == 1: # create a 2D array and squeeze results
|
175 |
+
lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze())
|
176 |
+
y_lr_pred = lr.predict(X).reshape(-1, 1)
|
177 |
+
y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze()
|
178 |
+
else:
|
179 |
+
lr.fit(X, transformer2.fit_transform(y))
|
180 |
+
y_lr_pred = lr.predict(X)
|
181 |
+
y_pred2 = transformer2.inverse_transform(y_lr_pred)
|
182 |
+
|
183 |
+
assert_allclose(y_pred, y_pred2)
|
184 |
+
assert_allclose(regr.regressor_.coef_, lr.coef_)
|
185 |
+
|
186 |
+
|
187 |
+
def test_transform_target_regressor_2d_transformer_multioutput():
|
188 |
+
# Check consistency with transformer accepting only 2D array and a 2D y
|
189 |
+
# array.
|
190 |
+
X = friedman[0]
|
191 |
+
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
|
192 |
+
transformer = StandardScaler()
|
193 |
+
regr = TransformedTargetRegressor(
|
194 |
+
regressor=LinearRegression(), transformer=transformer
|
195 |
+
)
|
196 |
+
y_pred = regr.fit(X, y).predict(X)
|
197 |
+
assert y.shape == y_pred.shape
|
198 |
+
# consistency forward transform
|
199 |
+
y_tran = regr.transformer_.transform(y)
|
200 |
+
_check_standard_scaled(y, y_tran)
|
201 |
+
assert y.shape == y_pred.shape
|
202 |
+
# consistency inverse transform
|
203 |
+
assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
|
204 |
+
# consistency of the regressor
|
205 |
+
lr = LinearRegression()
|
206 |
+
transformer2 = clone(transformer)
|
207 |
+
lr.fit(X, transformer2.fit_transform(y))
|
208 |
+
y_lr_pred = lr.predict(X)
|
209 |
+
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
|
210 |
+
assert_allclose(regr.regressor_.coef_, lr.coef_)
|
211 |
+
|
212 |
+
|
213 |
+
def test_transform_target_regressor_3d_target():
|
214 |
+
# Non-regression test for:
|
215 |
+
# https://github.com/scikit-learn/scikit-learn/issues/18866
|
216 |
+
# Check with a 3D target with a transformer that reshapes the target
|
217 |
+
X = friedman[0]
|
218 |
+
y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2])
|
219 |
+
|
220 |
+
def flatten_data(data):
|
221 |
+
return data.reshape(data.shape[0], -1)
|
222 |
+
|
223 |
+
def unflatten_data(data):
|
224 |
+
return data.reshape(data.shape[0], -1, 2)
|
225 |
+
|
226 |
+
transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data)
|
227 |
+
regr = TransformedTargetRegressor(
|
228 |
+
regressor=LinearRegression(), transformer=transformer
|
229 |
+
)
|
230 |
+
y_pred = regr.fit(X, y).predict(X)
|
231 |
+
assert y.shape == y_pred.shape
|
232 |
+
|
233 |
+
|
234 |
+
def test_transform_target_regressor_multi_to_single():
|
235 |
+
X = friedman[0]
|
236 |
+
y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)])
|
237 |
+
|
238 |
+
def func(y):
|
239 |
+
out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
|
240 |
+
return out[:, np.newaxis]
|
241 |
+
|
242 |
+
def inverse_func(y):
|
243 |
+
return y
|
244 |
+
|
245 |
+
tt = TransformedTargetRegressor(
|
246 |
+
func=func, inverse_func=inverse_func, check_inverse=False
|
247 |
+
)
|
248 |
+
tt.fit(X, y)
|
249 |
+
y_pred_2d_func = tt.predict(X)
|
250 |
+
assert y_pred_2d_func.shape == (100, 1)
|
251 |
+
|
252 |
+
# force that the function only return a 1D array
|
253 |
+
def func(y):
|
254 |
+
return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
|
255 |
+
|
256 |
+
tt = TransformedTargetRegressor(
|
257 |
+
func=func, inverse_func=inverse_func, check_inverse=False
|
258 |
+
)
|
259 |
+
tt.fit(X, y)
|
260 |
+
y_pred_1d_func = tt.predict(X)
|
261 |
+
assert y_pred_1d_func.shape == (100, 1)
|
262 |
+
|
263 |
+
assert_allclose(y_pred_1d_func, y_pred_2d_func)
|
264 |
+
|
265 |
+
|
266 |
+
class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator):
|
267 |
+
def fit(self, X, y=None):
|
268 |
+
assert isinstance(X, np.ndarray)
|
269 |
+
return self
|
270 |
+
|
271 |
+
def transform(self, X):
|
272 |
+
assert isinstance(X, np.ndarray)
|
273 |
+
return X
|
274 |
+
|
275 |
+
def inverse_transform(self, X):
|
276 |
+
assert isinstance(X, np.ndarray)
|
277 |
+
return X
|
278 |
+
|
279 |
+
|
280 |
+
class DummyCheckerListRegressor(DummyRegressor):
|
281 |
+
def fit(self, X, y, sample_weight=None):
|
282 |
+
assert isinstance(X, list)
|
283 |
+
return super().fit(X, y, sample_weight)
|
284 |
+
|
285 |
+
def predict(self, X):
|
286 |
+
assert isinstance(X, list)
|
287 |
+
return super().predict(X)
|
288 |
+
|
289 |
+
|
290 |
+
def test_transform_target_regressor_ensure_y_array():
|
291 |
+
# check that the target ``y`` passed to the transformer will always be a
|
292 |
+
# numpy array. Similarly, if ``X`` is passed as a list, we check that the
|
293 |
+
# predictor receive as it is.
|
294 |
+
X, y = friedman
|
295 |
+
tt = TransformedTargetRegressor(
|
296 |
+
transformer=DummyCheckerArrayTransformer(),
|
297 |
+
regressor=DummyCheckerListRegressor(),
|
298 |
+
check_inverse=False,
|
299 |
+
)
|
300 |
+
tt.fit(X.tolist(), y.tolist())
|
301 |
+
tt.predict(X.tolist())
|
302 |
+
with pytest.raises(AssertionError):
|
303 |
+
tt.fit(X, y.tolist())
|
304 |
+
with pytest.raises(AssertionError):
|
305 |
+
tt.predict(X)
|
306 |
+
|
307 |
+
|
308 |
+
class DummyTransformer(TransformerMixin, BaseEstimator):
|
309 |
+
"""Dummy transformer which count how many time fit was called."""
|
310 |
+
|
311 |
+
def __init__(self, fit_counter=0):
|
312 |
+
self.fit_counter = fit_counter
|
313 |
+
|
314 |
+
def fit(self, X, y=None):
|
315 |
+
self.fit_counter += 1
|
316 |
+
return self
|
317 |
+
|
318 |
+
def transform(self, X):
|
319 |
+
return X
|
320 |
+
|
321 |
+
def inverse_transform(self, X):
|
322 |
+
return X
|
323 |
+
|
324 |
+
|
325 |
+
@pytest.mark.parametrize("check_inverse", [False, True])
|
326 |
+
def test_transform_target_regressor_count_fit(check_inverse):
|
327 |
+
# regression test for gh-issue #11618
|
328 |
+
# check that we only call a single time fit for the transformer
|
329 |
+
X, y = friedman
|
330 |
+
ttr = TransformedTargetRegressor(
|
331 |
+
transformer=DummyTransformer(), check_inverse=check_inverse
|
332 |
+
)
|
333 |
+
ttr.fit(X, y)
|
334 |
+
assert ttr.transformer_.fit_counter == 1
|
335 |
+
|
336 |
+
|
337 |
+
class DummyRegressorWithExtraFitParams(DummyRegressor):
|
338 |
+
def fit(self, X, y, sample_weight=None, check_input=True):
|
339 |
+
# on the test below we force this to false, we make sure this is
|
340 |
+
# actually passed to the regressor
|
341 |
+
assert not check_input
|
342 |
+
return super().fit(X, y, sample_weight)
|
343 |
+
|
344 |
+
|
345 |
+
def test_transform_target_regressor_pass_fit_parameters():
|
346 |
+
X, y = friedman
|
347 |
+
regr = TransformedTargetRegressor(
|
348 |
+
regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
|
349 |
+
)
|
350 |
+
|
351 |
+
regr.fit(X, y, check_input=False)
|
352 |
+
assert regr.transformer_.fit_counter == 1
|
353 |
+
|
354 |
+
|
355 |
+
def test_transform_target_regressor_route_pipeline():
|
356 |
+
X, y = friedman
|
357 |
+
|
358 |
+
regr = TransformedTargetRegressor(
|
359 |
+
regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
|
360 |
+
)
|
361 |
+
estimators = [("normalize", StandardScaler()), ("est", regr)]
|
362 |
+
|
363 |
+
pip = Pipeline(estimators)
|
364 |
+
pip.fit(X, y, **{"est__check_input": False})
|
365 |
+
|
366 |
+
assert regr.transformer_.fit_counter == 1
|
367 |
+
|
368 |
+
|
369 |
+
class DummyRegressorWithExtraPredictParams(DummyRegressor):
|
370 |
+
def predict(self, X, check_input=True):
|
371 |
+
# In the test below we make sure that the check input parameter is
|
372 |
+
# passed as false
|
373 |
+
self.predict_called = True
|
374 |
+
assert not check_input
|
375 |
+
return super().predict(X)
|
376 |
+
|
377 |
+
|
378 |
+
def test_transform_target_regressor_pass_extra_predict_parameters():
|
379 |
+
# Checks that predict kwargs are passed to regressor.
|
380 |
+
X, y = friedman
|
381 |
+
regr = TransformedTargetRegressor(
|
382 |
+
regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer()
|
383 |
+
)
|
384 |
+
|
385 |
+
regr.fit(X, y)
|
386 |
+
regr.predict(X, check_input=False)
|
387 |
+
assert regr.regressor_.predict_called
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3e94cc7cea00f8a84fa5f6345203913a68efa42df18f87ddf9bead721bfd503
|
3 |
+
size 7105
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e53f65eb811df43c206f3534bb3af0e5fed213bc37ed6ba36310157d6023803
|
3 |
+
size 1050
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09f66e6debdee2cd2b5ae59e0d6abbb73fc2b0e0185d2e1957e9ebb51e23aa22
|
3 |
+
size 57523
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg
ADDED
![]() |
Git LFS Details
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg
ADDED
![]() |
Git LFS Details
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b105adfedc6b6b82f4695ca9bfe232393034cdf79803523f397a6dc5bf824d1
|
3 |
+
size 1544
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:046f5e60564693f0f3b8e382725c8012c3e058647139c24685cec984e40fcd00
|
3 |
+
size 1032
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44b9b0d290a1e339695a431438f84080071c5635161c3977dd17f4c27b00a34a
|
3 |
+
size 1507
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee1dcdf58f2f1072f7dd1b43388969c51bc6cfe776e3e9465ae6a756e5ddb10a
|
3 |
+
size 1152
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-40981.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c26dcbe30cfb39161f305b2b3d43a9b50adc8b368d0749568c47106cbdb20897
|
3 |
+
size 553
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4da63a60163340b6e18922abfe7f1f2a7a7da23da63c269324985d61ffaa6075
|
3 |
+
size 318
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:95f0938dfdf1b87d0ffc4d526f2c91e097ef7689480b693970126d908f291030
|
3 |
+
size 320
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f443b3add7375ca92ece9296b8449a0780305d3b5210c84994bdeab36271d62a
|
3 |
+
size 584
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdf-42074.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38b74e7f02a61ff55bcfac4d87103d5bffc43febb0c019d9aaa162f8f7693068
|
3 |
+
size 272
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8742a74bd5bc120acd9186c8a8737cb420ed9b009fade00b24e7ce5217797f2c
|
3 |
+
size 722
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f623e777c0a36ae6c82fae10a7c2088cb383298ea244595bf8dc95449c9be4c4
|
3 |
+
size 2326
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1d38fdd601b67bb9c6d16152f53ddf166a0cfcfef4fa86438e899bfe449226c
|
3 |
+
size 1798
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:407424fb79cc30b8e9ff90900b3bf29244ac7f3797f278b5be602843f959b4ee
|
3 |
+
size 425
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0703b0ae20b9ff75087dc601640ee58f1c2ad6768858ea21a245151da9ba8e4c
|
3 |
+
size 301
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70d4596ad879547863109da8675c2b789d07df66b526d7ebcbce9616c4c9b94c
|
3 |
+
size 347
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8743b2d93d2c62a82fb47e1fbc002b97e25adcfb5bf1fcb26b58ad0bed15bd48
|
3 |
+
size 1074
|
env-llmeval/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e96142b5e00dfec2617b0c22d7192b340ae2c28ec3ffc3a894c5be746b970a59
|
3 |
+
size 3303
|
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (5.75 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc
ADDED
Binary file (62.1 kB). View file
|
|