Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py +63 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_data.py +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py +472 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py +1678 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py +431 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_label.py +951 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py +1172 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py +531 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py +2593 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py +503 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py +591 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py +1258 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py +716 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_bunch.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_checks.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_multiclass.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_optimize.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_param_validation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_plotting.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_seq_dataset.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_show_versions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_stats.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/__init__.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The :mod:`sklearn.preprocessing` module includes scaling, centering,
|
3 |
+
normalization, binarization methods.
|
4 |
+
"""
|
5 |
+
|
6 |
+
from ._data import (
|
7 |
+
Binarizer,
|
8 |
+
KernelCenterer,
|
9 |
+
MaxAbsScaler,
|
10 |
+
MinMaxScaler,
|
11 |
+
Normalizer,
|
12 |
+
PowerTransformer,
|
13 |
+
QuantileTransformer,
|
14 |
+
RobustScaler,
|
15 |
+
StandardScaler,
|
16 |
+
add_dummy_feature,
|
17 |
+
binarize,
|
18 |
+
maxabs_scale,
|
19 |
+
minmax_scale,
|
20 |
+
normalize,
|
21 |
+
power_transform,
|
22 |
+
quantile_transform,
|
23 |
+
robust_scale,
|
24 |
+
scale,
|
25 |
+
)
|
26 |
+
from ._discretization import KBinsDiscretizer
|
27 |
+
from ._encoders import OneHotEncoder, OrdinalEncoder
|
28 |
+
from ._function_transformer import FunctionTransformer
|
29 |
+
from ._label import LabelBinarizer, LabelEncoder, MultiLabelBinarizer, label_binarize
|
30 |
+
from ._polynomial import PolynomialFeatures, SplineTransformer
|
31 |
+
from ._target_encoder import TargetEncoder
|
32 |
+
|
33 |
+
__all__ = [
|
34 |
+
"Binarizer",
|
35 |
+
"FunctionTransformer",
|
36 |
+
"KBinsDiscretizer",
|
37 |
+
"KernelCenterer",
|
38 |
+
"LabelBinarizer",
|
39 |
+
"LabelEncoder",
|
40 |
+
"MultiLabelBinarizer",
|
41 |
+
"MinMaxScaler",
|
42 |
+
"MaxAbsScaler",
|
43 |
+
"QuantileTransformer",
|
44 |
+
"Normalizer",
|
45 |
+
"OneHotEncoder",
|
46 |
+
"OrdinalEncoder",
|
47 |
+
"PowerTransformer",
|
48 |
+
"RobustScaler",
|
49 |
+
"SplineTransformer",
|
50 |
+
"StandardScaler",
|
51 |
+
"TargetEncoder",
|
52 |
+
"add_dummy_feature",
|
53 |
+
"PolynomialFeatures",
|
54 |
+
"binarize",
|
55 |
+
"normalize",
|
56 |
+
"scale",
|
57 |
+
"robust_scale",
|
58 |
+
"maxabs_scale",
|
59 |
+
"minmax_scale",
|
60 |
+
"label_binarize",
|
61 |
+
"quantile_transform",
|
62 |
+
"power_transform",
|
63 |
+
]
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_csr_polynomial_expansion.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (492 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_data.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_discretization.py
ADDED
@@ -0,0 +1,472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Author: Henry Lin <[email protected]>
|
2 |
+
# Tom Dupré la Tour
|
3 |
+
|
4 |
+
# License: BSD
|
5 |
+
|
6 |
+
|
7 |
+
import warnings
|
8 |
+
from numbers import Integral
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from ..base import BaseEstimator, TransformerMixin, _fit_context
|
13 |
+
from ..utils import _safe_indexing
|
14 |
+
from ..utils._param_validation import Hidden, Interval, Options, StrOptions
|
15 |
+
from ..utils.stats import _weighted_percentile
|
16 |
+
from ..utils.validation import (
|
17 |
+
_check_feature_names_in,
|
18 |
+
_check_sample_weight,
|
19 |
+
check_array,
|
20 |
+
check_is_fitted,
|
21 |
+
check_random_state,
|
22 |
+
)
|
23 |
+
from ._encoders import OneHotEncoder
|
24 |
+
|
25 |
+
|
26 |
+
class KBinsDiscretizer(TransformerMixin, BaseEstimator):
|
27 |
+
"""
|
28 |
+
Bin continuous data into intervals.
|
29 |
+
|
30 |
+
Read more in the :ref:`User Guide <preprocessing_discretization>`.
|
31 |
+
|
32 |
+
.. versionadded:: 0.20
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
n_bins : int or array-like of shape (n_features,), default=5
|
37 |
+
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
|
38 |
+
|
39 |
+
encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot'
|
40 |
+
Method used to encode the transformed result.
|
41 |
+
|
42 |
+
- 'onehot': Encode the transformed result with one-hot encoding
|
43 |
+
and return a sparse matrix. Ignored features are always
|
44 |
+
stacked to the right.
|
45 |
+
- 'onehot-dense': Encode the transformed result with one-hot encoding
|
46 |
+
and return a dense array. Ignored features are always
|
47 |
+
stacked to the right.
|
48 |
+
- 'ordinal': Return the bin identifier encoded as an integer value.
|
49 |
+
|
50 |
+
strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile'
|
51 |
+
Strategy used to define the widths of the bins.
|
52 |
+
|
53 |
+
- 'uniform': All bins in each feature have identical widths.
|
54 |
+
- 'quantile': All bins in each feature have the same number of points.
|
55 |
+
- 'kmeans': Values in each bin have the same nearest center of a 1D
|
56 |
+
k-means cluster.
|
57 |
+
|
58 |
+
For an example of the different strategies see:
|
59 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_strategies.py`.
|
60 |
+
|
61 |
+
dtype : {np.float32, np.float64}, default=None
|
62 |
+
The desired data-type for the output. If None, output dtype is
|
63 |
+
consistent with input dtype. Only np.float32 and np.float64 are
|
64 |
+
supported.
|
65 |
+
|
66 |
+
.. versionadded:: 0.24
|
67 |
+
|
68 |
+
subsample : int or None, default='warn'
|
69 |
+
Maximum number of samples, used to fit the model, for computational
|
70 |
+
efficiency. Defaults to 200_000 when `strategy='quantile'` and to `None`
|
71 |
+
when `strategy='uniform'` or `strategy='kmeans'`.
|
72 |
+
`subsample=None` means that all the training samples are used when
|
73 |
+
computing the quantiles that determine the binning thresholds.
|
74 |
+
Since quantile computation relies on sorting each column of `X` and
|
75 |
+
that sorting has an `n log(n)` time complexity,
|
76 |
+
it is recommended to use subsampling on datasets with a
|
77 |
+
very large number of samples.
|
78 |
+
|
79 |
+
.. versionchanged:: 1.3
|
80 |
+
The default value of `subsample` changed from `None` to `200_000` when
|
81 |
+
`strategy="quantile"`.
|
82 |
+
|
83 |
+
.. versionchanged:: 1.5
|
84 |
+
The default value of `subsample` changed from `None` to `200_000` when
|
85 |
+
`strategy="uniform"` or `strategy="kmeans"`.
|
86 |
+
|
87 |
+
random_state : int, RandomState instance or None, default=None
|
88 |
+
Determines random number generation for subsampling.
|
89 |
+
Pass an int for reproducible results across multiple function calls.
|
90 |
+
See the `subsample` parameter for more details.
|
91 |
+
See :term:`Glossary <random_state>`.
|
92 |
+
|
93 |
+
.. versionadded:: 1.1
|
94 |
+
|
95 |
+
Attributes
|
96 |
+
----------
|
97 |
+
bin_edges_ : ndarray of ndarray of shape (n_features,)
|
98 |
+
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
|
99 |
+
Ignored features will have empty arrays.
|
100 |
+
|
101 |
+
n_bins_ : ndarray of shape (n_features,), dtype=np.int64
|
102 |
+
Number of bins per feature. Bins whose width are too small
|
103 |
+
(i.e., <= 1e-8) are removed with a warning.
|
104 |
+
|
105 |
+
n_features_in_ : int
|
106 |
+
Number of features seen during :term:`fit`.
|
107 |
+
|
108 |
+
.. versionadded:: 0.24
|
109 |
+
|
110 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
111 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
112 |
+
has feature names that are all strings.
|
113 |
+
|
114 |
+
.. versionadded:: 1.0
|
115 |
+
|
116 |
+
See Also
|
117 |
+
--------
|
118 |
+
Binarizer : Class used to bin values as ``0`` or
|
119 |
+
``1`` based on a parameter ``threshold``.
|
120 |
+
|
121 |
+
Notes
|
122 |
+
-----
|
123 |
+
|
124 |
+
For a visualization of discretization on different datasets refer to
|
125 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_classification.py`.
|
126 |
+
On the effect of discretization on linear models see:
|
127 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_discretization.py`.
|
128 |
+
|
129 |
+
In bin edges for feature ``i``, the first and last values are used only for
|
130 |
+
``inverse_transform``. During transform, bin edges are extended to::
|
131 |
+
|
132 |
+
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
|
133 |
+
|
134 |
+
You can combine ``KBinsDiscretizer`` with
|
135 |
+
:class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess
|
136 |
+
part of the features.
|
137 |
+
|
138 |
+
``KBinsDiscretizer`` might produce constant features (e.g., when
|
139 |
+
``encode = 'onehot'`` and certain bins do not contain any data).
|
140 |
+
These features can be removed with feature selection algorithms
|
141 |
+
(e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).
|
142 |
+
|
143 |
+
Examples
|
144 |
+
--------
|
145 |
+
>>> from sklearn.preprocessing import KBinsDiscretizer
|
146 |
+
>>> X = [[-2, 1, -4, -1],
|
147 |
+
... [-1, 2, -3, -0.5],
|
148 |
+
... [ 0, 3, -2, 0.5],
|
149 |
+
... [ 1, 4, -1, 2]]
|
150 |
+
>>> est = KBinsDiscretizer(
|
151 |
+
... n_bins=3, encode='ordinal', strategy='uniform', subsample=None
|
152 |
+
... )
|
153 |
+
>>> est.fit(X)
|
154 |
+
KBinsDiscretizer(...)
|
155 |
+
>>> Xt = est.transform(X)
|
156 |
+
>>> Xt # doctest: +SKIP
|
157 |
+
array([[ 0., 0., 0., 0.],
|
158 |
+
[ 1., 1., 1., 0.],
|
159 |
+
[ 2., 2., 2., 1.],
|
160 |
+
[ 2., 2., 2., 2.]])
|
161 |
+
|
162 |
+
Sometimes it may be useful to convert the data back into the original
|
163 |
+
feature space. The ``inverse_transform`` function converts the binned
|
164 |
+
data into the original feature space. Each value will be equal to the mean
|
165 |
+
of the two bin edges.
|
166 |
+
|
167 |
+
>>> est.bin_edges_[0]
|
168 |
+
array([-2., -1., 0., 1.])
|
169 |
+
>>> est.inverse_transform(Xt)
|
170 |
+
array([[-1.5, 1.5, -3.5, -0.5],
|
171 |
+
[-0.5, 2.5, -2.5, -0.5],
|
172 |
+
[ 0.5, 3.5, -1.5, 0.5],
|
173 |
+
[ 0.5, 3.5, -1.5, 1.5]])
|
174 |
+
"""
|
175 |
+
|
176 |
+
_parameter_constraints: dict = {
|
177 |
+
"n_bins": [Interval(Integral, 2, None, closed="left"), "array-like"],
|
178 |
+
"encode": [StrOptions({"onehot", "onehot-dense", "ordinal"})],
|
179 |
+
"strategy": [StrOptions({"uniform", "quantile", "kmeans"})],
|
180 |
+
"dtype": [Options(type, {np.float64, np.float32}), None],
|
181 |
+
"subsample": [
|
182 |
+
Interval(Integral, 1, None, closed="left"),
|
183 |
+
None,
|
184 |
+
Hidden(StrOptions({"warn"})),
|
185 |
+
],
|
186 |
+
"random_state": ["random_state"],
|
187 |
+
}
|
188 |
+
|
189 |
+
def __init__(
|
190 |
+
self,
|
191 |
+
n_bins=5,
|
192 |
+
*,
|
193 |
+
encode="onehot",
|
194 |
+
strategy="quantile",
|
195 |
+
dtype=None,
|
196 |
+
subsample="warn",
|
197 |
+
random_state=None,
|
198 |
+
):
|
199 |
+
self.n_bins = n_bins
|
200 |
+
self.encode = encode
|
201 |
+
self.strategy = strategy
|
202 |
+
self.dtype = dtype
|
203 |
+
self.subsample = subsample
|
204 |
+
self.random_state = random_state
|
205 |
+
|
206 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
207 |
+
def fit(self, X, y=None, sample_weight=None):
|
208 |
+
"""
|
209 |
+
Fit the estimator.
|
210 |
+
|
211 |
+
Parameters
|
212 |
+
----------
|
213 |
+
X : array-like of shape (n_samples, n_features)
|
214 |
+
Data to be discretized.
|
215 |
+
|
216 |
+
y : None
|
217 |
+
Ignored. This parameter exists only for compatibility with
|
218 |
+
:class:`~sklearn.pipeline.Pipeline`.
|
219 |
+
|
220 |
+
sample_weight : ndarray of shape (n_samples,)
|
221 |
+
Contains weight values to be associated with each sample.
|
222 |
+
Only possible when `strategy` is set to `"quantile"`.
|
223 |
+
|
224 |
+
.. versionadded:: 1.3
|
225 |
+
|
226 |
+
Returns
|
227 |
+
-------
|
228 |
+
self : object
|
229 |
+
Returns the instance itself.
|
230 |
+
"""
|
231 |
+
X = self._validate_data(X, dtype="numeric")
|
232 |
+
|
233 |
+
if self.dtype in (np.float64, np.float32):
|
234 |
+
output_dtype = self.dtype
|
235 |
+
else: # self.dtype is None
|
236 |
+
output_dtype = X.dtype
|
237 |
+
|
238 |
+
n_samples, n_features = X.shape
|
239 |
+
|
240 |
+
if sample_weight is not None and self.strategy == "uniform":
|
241 |
+
raise ValueError(
|
242 |
+
"`sample_weight` was provided but it cannot be "
|
243 |
+
"used with strategy='uniform'. Got strategy="
|
244 |
+
f"{self.strategy!r} instead."
|
245 |
+
)
|
246 |
+
|
247 |
+
if self.strategy in ("uniform", "kmeans") and self.subsample == "warn":
|
248 |
+
warnings.warn(
|
249 |
+
(
|
250 |
+
"In version 1.5 onwards, subsample=200_000 "
|
251 |
+
"will be used by default. Set subsample explicitly to "
|
252 |
+
"silence this warning in the mean time. Set "
|
253 |
+
"subsample=None to disable subsampling explicitly."
|
254 |
+
),
|
255 |
+
FutureWarning,
|
256 |
+
)
|
257 |
+
|
258 |
+
subsample = self.subsample
|
259 |
+
if subsample == "warn":
|
260 |
+
subsample = 200000 if self.strategy == "quantile" else None
|
261 |
+
if subsample is not None and n_samples > subsample:
|
262 |
+
rng = check_random_state(self.random_state)
|
263 |
+
subsample_idx = rng.choice(n_samples, size=subsample, replace=False)
|
264 |
+
X = _safe_indexing(X, subsample_idx)
|
265 |
+
|
266 |
+
n_features = X.shape[1]
|
267 |
+
n_bins = self._validate_n_bins(n_features)
|
268 |
+
|
269 |
+
if sample_weight is not None:
|
270 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
271 |
+
|
272 |
+
bin_edges = np.zeros(n_features, dtype=object)
|
273 |
+
for jj in range(n_features):
|
274 |
+
column = X[:, jj]
|
275 |
+
col_min, col_max = column.min(), column.max()
|
276 |
+
|
277 |
+
if col_min == col_max:
|
278 |
+
warnings.warn(
|
279 |
+
"Feature %d is constant and will be replaced with 0." % jj
|
280 |
+
)
|
281 |
+
n_bins[jj] = 1
|
282 |
+
bin_edges[jj] = np.array([-np.inf, np.inf])
|
283 |
+
continue
|
284 |
+
|
285 |
+
if self.strategy == "uniform":
|
286 |
+
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
|
287 |
+
|
288 |
+
elif self.strategy == "quantile":
|
289 |
+
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
|
290 |
+
if sample_weight is None:
|
291 |
+
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
|
292 |
+
else:
|
293 |
+
bin_edges[jj] = np.asarray(
|
294 |
+
[
|
295 |
+
_weighted_percentile(column, sample_weight, q)
|
296 |
+
for q in quantiles
|
297 |
+
],
|
298 |
+
dtype=np.float64,
|
299 |
+
)
|
300 |
+
elif self.strategy == "kmeans":
|
301 |
+
from ..cluster import KMeans # fixes import loops
|
302 |
+
|
303 |
+
# Deterministic initialization with uniform spacing
|
304 |
+
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
|
305 |
+
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
|
306 |
+
|
307 |
+
# 1D k-means procedure
|
308 |
+
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
|
309 |
+
centers = km.fit(
|
310 |
+
column[:, None], sample_weight=sample_weight
|
311 |
+
).cluster_centers_[:, 0]
|
312 |
+
# Must sort, centers may be unsorted even with sorted init
|
313 |
+
centers.sort()
|
314 |
+
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
|
315 |
+
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
|
316 |
+
|
317 |
+
# Remove bins whose width are too small (i.e., <= 1e-8)
|
318 |
+
if self.strategy in ("quantile", "kmeans"):
|
319 |
+
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
|
320 |
+
bin_edges[jj] = bin_edges[jj][mask]
|
321 |
+
if len(bin_edges[jj]) - 1 != n_bins[jj]:
|
322 |
+
warnings.warn(
|
323 |
+
"Bins whose width are too small (i.e., <= "
|
324 |
+
"1e-8) in feature %d are removed. Consider "
|
325 |
+
"decreasing the number of bins." % jj
|
326 |
+
)
|
327 |
+
n_bins[jj] = len(bin_edges[jj]) - 1
|
328 |
+
|
329 |
+
self.bin_edges_ = bin_edges
|
330 |
+
self.n_bins_ = n_bins
|
331 |
+
|
332 |
+
if "onehot" in self.encode:
|
333 |
+
self._encoder = OneHotEncoder(
|
334 |
+
categories=[np.arange(i) for i in self.n_bins_],
|
335 |
+
sparse_output=self.encode == "onehot",
|
336 |
+
dtype=output_dtype,
|
337 |
+
)
|
338 |
+
# Fit the OneHotEncoder with toy datasets
|
339 |
+
# so that it's ready for use after the KBinsDiscretizer is fitted
|
340 |
+
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
|
341 |
+
|
342 |
+
return self
|
343 |
+
|
344 |
+
def _validate_n_bins(self, n_features):
|
345 |
+
"""Returns n_bins_, the number of bins per feature."""
|
346 |
+
orig_bins = self.n_bins
|
347 |
+
if isinstance(orig_bins, Integral):
|
348 |
+
return np.full(n_features, orig_bins, dtype=int)
|
349 |
+
|
350 |
+
n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)
|
351 |
+
|
352 |
+
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
|
353 |
+
raise ValueError("n_bins must be a scalar or array of shape (n_features,).")
|
354 |
+
|
355 |
+
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
|
356 |
+
|
357 |
+
violating_indices = np.where(bad_nbins_value)[0]
|
358 |
+
if violating_indices.shape[0] > 0:
|
359 |
+
indices = ", ".join(str(i) for i in violating_indices)
|
360 |
+
raise ValueError(
|
361 |
+
"{} received an invalid number "
|
362 |
+
"of bins at indices {}. Number of bins "
|
363 |
+
"must be at least 2, and must be an int.".format(
|
364 |
+
KBinsDiscretizer.__name__, indices
|
365 |
+
)
|
366 |
+
)
|
367 |
+
return n_bins
|
368 |
+
|
369 |
+
def transform(self, X):
|
370 |
+
"""
|
371 |
+
Discretize the data.
|
372 |
+
|
373 |
+
Parameters
|
374 |
+
----------
|
375 |
+
X : array-like of shape (n_samples, n_features)
|
376 |
+
Data to be discretized.
|
377 |
+
|
378 |
+
Returns
|
379 |
+
-------
|
380 |
+
Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
|
381 |
+
Data in the binned space. Will be a sparse matrix if
|
382 |
+
`self.encode='onehot'` and ndarray otherwise.
|
383 |
+
"""
|
384 |
+
check_is_fitted(self)
|
385 |
+
|
386 |
+
# check input and attribute dtypes
|
387 |
+
dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
|
388 |
+
Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False)
|
389 |
+
|
390 |
+
bin_edges = self.bin_edges_
|
391 |
+
for jj in range(Xt.shape[1]):
|
392 |
+
Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right")
|
393 |
+
|
394 |
+
if self.encode == "ordinal":
|
395 |
+
return Xt
|
396 |
+
|
397 |
+
dtype_init = None
|
398 |
+
if "onehot" in self.encode:
|
399 |
+
dtype_init = self._encoder.dtype
|
400 |
+
self._encoder.dtype = Xt.dtype
|
401 |
+
try:
|
402 |
+
Xt_enc = self._encoder.transform(Xt)
|
403 |
+
finally:
|
404 |
+
# revert the initial dtype to avoid modifying self.
|
405 |
+
self._encoder.dtype = dtype_init
|
406 |
+
return Xt_enc
|
407 |
+
|
408 |
+
def inverse_transform(self, Xt):
|
409 |
+
"""
|
410 |
+
Transform discretized data back to original feature space.
|
411 |
+
|
412 |
+
Note that this function does not regenerate the original data
|
413 |
+
due to discretization rounding.
|
414 |
+
|
415 |
+
Parameters
|
416 |
+
----------
|
417 |
+
Xt : array-like of shape (n_samples, n_features)
|
418 |
+
Transformed data in the binned space.
|
419 |
+
|
420 |
+
Returns
|
421 |
+
-------
|
422 |
+
Xinv : ndarray, dtype={np.float32, np.float64}
|
423 |
+
Data in the original feature space.
|
424 |
+
"""
|
425 |
+
check_is_fitted(self)
|
426 |
+
|
427 |
+
if "onehot" in self.encode:
|
428 |
+
Xt = self._encoder.inverse_transform(Xt)
|
429 |
+
|
430 |
+
Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32))
|
431 |
+
n_features = self.n_bins_.shape[0]
|
432 |
+
if Xinv.shape[1] != n_features:
|
433 |
+
raise ValueError(
|
434 |
+
"Incorrect number of features. Expecting {}, received {}.".format(
|
435 |
+
n_features, Xinv.shape[1]
|
436 |
+
)
|
437 |
+
)
|
438 |
+
|
439 |
+
for jj in range(n_features):
|
440 |
+
bin_edges = self.bin_edges_[jj]
|
441 |
+
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
|
442 |
+
Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)]
|
443 |
+
|
444 |
+
return Xinv
|
445 |
+
|
446 |
+
def get_feature_names_out(self, input_features=None):
|
447 |
+
"""Get output feature names.
|
448 |
+
|
449 |
+
Parameters
|
450 |
+
----------
|
451 |
+
input_features : array-like of str or None, default=None
|
452 |
+
Input features.
|
453 |
+
|
454 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
455 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
456 |
+
then the following input feature names are generated:
|
457 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
458 |
+
- If `input_features` is an array-like, then `input_features` must
|
459 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
460 |
+
|
461 |
+
Returns
|
462 |
+
-------
|
463 |
+
feature_names_out : ndarray of str objects
|
464 |
+
Transformed feature names.
|
465 |
+
"""
|
466 |
+
check_is_fitted(self, "n_features_in_")
|
467 |
+
input_features = _check_feature_names_in(self, input_features)
|
468 |
+
if hasattr(self, "_encoder"):
|
469 |
+
return self._encoder.get_feature_names_out(input_features)
|
470 |
+
|
471 |
+
# ordinal encoding
|
472 |
+
return input_features
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_encoders.py
ADDED
@@ -0,0 +1,1678 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Authors: Andreas Mueller <[email protected]>
|
2 |
+
# Joris Van den Bossche <[email protected]>
|
3 |
+
# License: BSD 3 clause
|
4 |
+
|
5 |
+
import numbers
|
6 |
+
import warnings
|
7 |
+
from numbers import Integral
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
from scipy import sparse
|
11 |
+
|
12 |
+
from ..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context
|
13 |
+
from ..utils import _safe_indexing, check_array, is_scalar_nan
|
14 |
+
from ..utils._encode import _check_unknown, _encode, _get_counts, _unique
|
15 |
+
from ..utils._mask import _get_mask
|
16 |
+
from ..utils._param_validation import Interval, RealNotInt, StrOptions
|
17 |
+
from ..utils._set_output import _get_output_config
|
18 |
+
from ..utils.validation import _check_feature_names_in, check_is_fitted
|
19 |
+
|
20 |
+
__all__ = ["OneHotEncoder", "OrdinalEncoder"]
|
21 |
+
|
22 |
+
|
23 |
+
class _BaseEncoder(TransformerMixin, BaseEstimator):
|
24 |
+
"""
|
25 |
+
Base class for encoders that includes the code to categorize and
|
26 |
+
transform the input features.
|
27 |
+
|
28 |
+
"""
|
29 |
+
|
30 |
+
def _check_X(self, X, force_all_finite=True):
|
31 |
+
"""
|
32 |
+
Perform custom check_array:
|
33 |
+
- convert list of strings to object dtype
|
34 |
+
- check for missing values for object dtype data (check_array does
|
35 |
+
not do that)
|
36 |
+
- return list of features (arrays): this list of features is
|
37 |
+
constructed feature by feature to preserve the data types
|
38 |
+
of pandas DataFrame columns, as otherwise information is lost
|
39 |
+
and cannot be used, e.g. for the `categories_` attribute.
|
40 |
+
|
41 |
+
"""
|
42 |
+
if not (hasattr(X, "iloc") and getattr(X, "ndim", 0) == 2):
|
43 |
+
# if not a dataframe, do normal check_array validation
|
44 |
+
X_temp = check_array(X, dtype=None, force_all_finite=force_all_finite)
|
45 |
+
if not hasattr(X, "dtype") and np.issubdtype(X_temp.dtype, np.str_):
|
46 |
+
X = check_array(X, dtype=object, force_all_finite=force_all_finite)
|
47 |
+
else:
|
48 |
+
X = X_temp
|
49 |
+
needs_validation = False
|
50 |
+
else:
|
51 |
+
# pandas dataframe, do validation later column by column, in order
|
52 |
+
# to keep the dtype information to be used in the encoder.
|
53 |
+
needs_validation = force_all_finite
|
54 |
+
|
55 |
+
n_samples, n_features = X.shape
|
56 |
+
X_columns = []
|
57 |
+
|
58 |
+
for i in range(n_features):
|
59 |
+
Xi = _safe_indexing(X, indices=i, axis=1)
|
60 |
+
Xi = check_array(
|
61 |
+
Xi, ensure_2d=False, dtype=None, force_all_finite=needs_validation
|
62 |
+
)
|
63 |
+
X_columns.append(Xi)
|
64 |
+
|
65 |
+
return X_columns, n_samples, n_features
|
66 |
+
|
67 |
+
def _fit(
|
68 |
+
self,
|
69 |
+
X,
|
70 |
+
handle_unknown="error",
|
71 |
+
force_all_finite=True,
|
72 |
+
return_counts=False,
|
73 |
+
return_and_ignore_missing_for_infrequent=False,
|
74 |
+
):
|
75 |
+
self._check_infrequent_enabled()
|
76 |
+
self._check_n_features(X, reset=True)
|
77 |
+
self._check_feature_names(X, reset=True)
|
78 |
+
X_list, n_samples, n_features = self._check_X(
|
79 |
+
X, force_all_finite=force_all_finite
|
80 |
+
)
|
81 |
+
self.n_features_in_ = n_features
|
82 |
+
|
83 |
+
if self.categories != "auto":
|
84 |
+
if len(self.categories) != n_features:
|
85 |
+
raise ValueError(
|
86 |
+
"Shape mismatch: if categories is an array,"
|
87 |
+
" it has to be of shape (n_features,)."
|
88 |
+
)
|
89 |
+
|
90 |
+
self.categories_ = []
|
91 |
+
category_counts = []
|
92 |
+
compute_counts = return_counts or self._infrequent_enabled
|
93 |
+
|
94 |
+
for i in range(n_features):
|
95 |
+
Xi = X_list[i]
|
96 |
+
|
97 |
+
if self.categories == "auto":
|
98 |
+
result = _unique(Xi, return_counts=compute_counts)
|
99 |
+
if compute_counts:
|
100 |
+
cats, counts = result
|
101 |
+
category_counts.append(counts)
|
102 |
+
else:
|
103 |
+
cats = result
|
104 |
+
else:
|
105 |
+
if np.issubdtype(Xi.dtype, np.str_):
|
106 |
+
# Always convert string categories to objects to avoid
|
107 |
+
# unexpected string truncation for longer category labels
|
108 |
+
# passed in the constructor.
|
109 |
+
Xi_dtype = object
|
110 |
+
else:
|
111 |
+
Xi_dtype = Xi.dtype
|
112 |
+
|
113 |
+
cats = np.array(self.categories[i], dtype=Xi_dtype)
|
114 |
+
if (
|
115 |
+
cats.dtype == object
|
116 |
+
and isinstance(cats[0], bytes)
|
117 |
+
and Xi.dtype.kind != "S"
|
118 |
+
):
|
119 |
+
msg = (
|
120 |
+
f"In column {i}, the predefined categories have type 'bytes'"
|
121 |
+
" which is incompatible with values of type"
|
122 |
+
f" '{type(Xi[0]).__name__}'."
|
123 |
+
)
|
124 |
+
raise ValueError(msg)
|
125 |
+
|
126 |
+
# `nan` must be the last stated category
|
127 |
+
for category in cats[:-1]:
|
128 |
+
if is_scalar_nan(category):
|
129 |
+
raise ValueError(
|
130 |
+
"Nan should be the last element in user"
|
131 |
+
f" provided categories, see categories {cats}"
|
132 |
+
f" in column #{i}"
|
133 |
+
)
|
134 |
+
|
135 |
+
if cats.size != len(_unique(cats)):
|
136 |
+
msg = (
|
137 |
+
f"In column {i}, the predefined categories"
|
138 |
+
" contain duplicate elements."
|
139 |
+
)
|
140 |
+
raise ValueError(msg)
|
141 |
+
|
142 |
+
if Xi.dtype.kind not in "OUS":
|
143 |
+
sorted_cats = np.sort(cats)
|
144 |
+
error_msg = (
|
145 |
+
"Unsorted categories are not supported for numerical categories"
|
146 |
+
)
|
147 |
+
# if there are nans, nan should be the last element
|
148 |
+
stop_idx = -1 if np.isnan(sorted_cats[-1]) else None
|
149 |
+
if np.any(sorted_cats[:stop_idx] != cats[:stop_idx]):
|
150 |
+
raise ValueError(error_msg)
|
151 |
+
|
152 |
+
if handle_unknown == "error":
|
153 |
+
diff = _check_unknown(Xi, cats)
|
154 |
+
if diff:
|
155 |
+
msg = (
|
156 |
+
"Found unknown categories {0} in column {1}"
|
157 |
+
" during fit".format(diff, i)
|
158 |
+
)
|
159 |
+
raise ValueError(msg)
|
160 |
+
if compute_counts:
|
161 |
+
category_counts.append(_get_counts(Xi, cats))
|
162 |
+
|
163 |
+
self.categories_.append(cats)
|
164 |
+
|
165 |
+
output = {"n_samples": n_samples}
|
166 |
+
if return_counts:
|
167 |
+
output["category_counts"] = category_counts
|
168 |
+
|
169 |
+
missing_indices = {}
|
170 |
+
if return_and_ignore_missing_for_infrequent:
|
171 |
+
for feature_idx, categories_for_idx in enumerate(self.categories_):
|
172 |
+
if is_scalar_nan(categories_for_idx[-1]):
|
173 |
+
# `nan` values can only be placed in the latest position
|
174 |
+
missing_indices[feature_idx] = categories_for_idx.size - 1
|
175 |
+
output["missing_indices"] = missing_indices
|
176 |
+
|
177 |
+
if self._infrequent_enabled:
|
178 |
+
self._fit_infrequent_category_mapping(
|
179 |
+
n_samples,
|
180 |
+
category_counts,
|
181 |
+
missing_indices,
|
182 |
+
)
|
183 |
+
return output
|
184 |
+
|
185 |
+
def _transform(
|
186 |
+
self,
|
187 |
+
X,
|
188 |
+
handle_unknown="error",
|
189 |
+
force_all_finite=True,
|
190 |
+
warn_on_unknown=False,
|
191 |
+
ignore_category_indices=None,
|
192 |
+
):
|
193 |
+
X_list, n_samples, n_features = self._check_X(
|
194 |
+
X, force_all_finite=force_all_finite
|
195 |
+
)
|
196 |
+
self._check_feature_names(X, reset=False)
|
197 |
+
self._check_n_features(X, reset=False)
|
198 |
+
|
199 |
+
X_int = np.zeros((n_samples, n_features), dtype=int)
|
200 |
+
X_mask = np.ones((n_samples, n_features), dtype=bool)
|
201 |
+
|
202 |
+
columns_with_unknown = []
|
203 |
+
for i in range(n_features):
|
204 |
+
Xi = X_list[i]
|
205 |
+
diff, valid_mask = _check_unknown(Xi, self.categories_[i], return_mask=True)
|
206 |
+
|
207 |
+
if not np.all(valid_mask):
|
208 |
+
if handle_unknown == "error":
|
209 |
+
msg = (
|
210 |
+
"Found unknown categories {0} in column {1}"
|
211 |
+
" during transform".format(diff, i)
|
212 |
+
)
|
213 |
+
raise ValueError(msg)
|
214 |
+
else:
|
215 |
+
if warn_on_unknown:
|
216 |
+
columns_with_unknown.append(i)
|
217 |
+
# Set the problematic rows to an acceptable value and
|
218 |
+
# continue `The rows are marked `X_mask` and will be
|
219 |
+
# removed later.
|
220 |
+
X_mask[:, i] = valid_mask
|
221 |
+
# cast Xi into the largest string type necessary
|
222 |
+
# to handle different lengths of numpy strings
|
223 |
+
if (
|
224 |
+
self.categories_[i].dtype.kind in ("U", "S")
|
225 |
+
and self.categories_[i].itemsize > Xi.itemsize
|
226 |
+
):
|
227 |
+
Xi = Xi.astype(self.categories_[i].dtype)
|
228 |
+
elif self.categories_[i].dtype.kind == "O" and Xi.dtype.kind == "U":
|
229 |
+
# categories are objects and Xi are numpy strings.
|
230 |
+
# Cast Xi to an object dtype to prevent truncation
|
231 |
+
# when setting invalid values.
|
232 |
+
Xi = Xi.astype("O")
|
233 |
+
else:
|
234 |
+
Xi = Xi.copy()
|
235 |
+
|
236 |
+
Xi[~valid_mask] = self.categories_[i][0]
|
237 |
+
# We use check_unknown=False, since _check_unknown was
|
238 |
+
# already called above.
|
239 |
+
X_int[:, i] = _encode(Xi, uniques=self.categories_[i], check_unknown=False)
|
240 |
+
if columns_with_unknown:
|
241 |
+
warnings.warn(
|
242 |
+
(
|
243 |
+
"Found unknown categories in columns "
|
244 |
+
f"{columns_with_unknown} during transform. These "
|
245 |
+
"unknown categories will be encoded as all zeros"
|
246 |
+
),
|
247 |
+
UserWarning,
|
248 |
+
)
|
249 |
+
|
250 |
+
self._map_infrequent_categories(X_int, X_mask, ignore_category_indices)
|
251 |
+
return X_int, X_mask
|
252 |
+
|
253 |
+
@property
|
254 |
+
def infrequent_categories_(self):
|
255 |
+
"""Infrequent categories for each feature."""
|
256 |
+
# raises an AttributeError if `_infrequent_indices` is not defined
|
257 |
+
infrequent_indices = self._infrequent_indices
|
258 |
+
return [
|
259 |
+
None if indices is None else category[indices]
|
260 |
+
for category, indices in zip(self.categories_, infrequent_indices)
|
261 |
+
]
|
262 |
+
|
263 |
+
def _check_infrequent_enabled(self):
|
264 |
+
"""
|
265 |
+
This functions checks whether _infrequent_enabled is True or False.
|
266 |
+
This has to be called after parameter validation in the fit function.
|
267 |
+
"""
|
268 |
+
max_categories = getattr(self, "max_categories", None)
|
269 |
+
min_frequency = getattr(self, "min_frequency", None)
|
270 |
+
self._infrequent_enabled = (
|
271 |
+
max_categories is not None and max_categories >= 1
|
272 |
+
) or min_frequency is not None
|
273 |
+
|
274 |
+
def _identify_infrequent(self, category_count, n_samples, col_idx):
|
275 |
+
"""Compute the infrequent indices.
|
276 |
+
|
277 |
+
Parameters
|
278 |
+
----------
|
279 |
+
category_count : ndarray of shape (n_cardinality,)
|
280 |
+
Category counts.
|
281 |
+
|
282 |
+
n_samples : int
|
283 |
+
Number of samples.
|
284 |
+
|
285 |
+
col_idx : int
|
286 |
+
Index of the current category. Only used for the error message.
|
287 |
+
|
288 |
+
Returns
|
289 |
+
-------
|
290 |
+
output : ndarray of shape (n_infrequent_categories,) or None
|
291 |
+
If there are infrequent categories, indices of infrequent
|
292 |
+
categories. Otherwise None.
|
293 |
+
"""
|
294 |
+
if isinstance(self.min_frequency, numbers.Integral):
|
295 |
+
infrequent_mask = category_count < self.min_frequency
|
296 |
+
elif isinstance(self.min_frequency, numbers.Real):
|
297 |
+
min_frequency_abs = n_samples * self.min_frequency
|
298 |
+
infrequent_mask = category_count < min_frequency_abs
|
299 |
+
else:
|
300 |
+
infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)
|
301 |
+
|
302 |
+
n_current_features = category_count.size - infrequent_mask.sum() + 1
|
303 |
+
if self.max_categories is not None and self.max_categories < n_current_features:
|
304 |
+
# max_categories includes the one infrequent category
|
305 |
+
frequent_category_count = self.max_categories - 1
|
306 |
+
if frequent_category_count == 0:
|
307 |
+
# All categories are infrequent
|
308 |
+
infrequent_mask[:] = True
|
309 |
+
else:
|
310 |
+
# stable sort to preserve original count order
|
311 |
+
smallest_levels = np.argsort(category_count, kind="mergesort")[
|
312 |
+
:-frequent_category_count
|
313 |
+
]
|
314 |
+
infrequent_mask[smallest_levels] = True
|
315 |
+
|
316 |
+
output = np.flatnonzero(infrequent_mask)
|
317 |
+
return output if output.size > 0 else None
|
318 |
+
|
319 |
+
def _fit_infrequent_category_mapping(
|
320 |
+
self, n_samples, category_counts, missing_indices
|
321 |
+
):
|
322 |
+
"""Fit infrequent categories.
|
323 |
+
|
324 |
+
Defines the private attribute: `_default_to_infrequent_mappings`. For
|
325 |
+
feature `i`, `_default_to_infrequent_mappings[i]` defines the mapping
|
326 |
+
from the integer encoding returned by `super().transform()` into
|
327 |
+
infrequent categories. If `_default_to_infrequent_mappings[i]` is None,
|
328 |
+
there were no infrequent categories in the training set.
|
329 |
+
|
330 |
+
For example if categories 0, 2 and 4 were frequent, while categories
|
331 |
+
1, 3, 5 were infrequent for feature 7, then these categories are mapped
|
332 |
+
to a single output:
|
333 |
+
`_default_to_infrequent_mappings[7] = array([0, 3, 1, 3, 2, 3])`
|
334 |
+
|
335 |
+
Defines private attribute: `_infrequent_indices`. `_infrequent_indices[i]`
|
336 |
+
is an array of indices such that
|
337 |
+
`categories_[i][_infrequent_indices[i]]` are all the infrequent category
|
338 |
+
labels. If the feature `i` has no infrequent categories
|
339 |
+
`_infrequent_indices[i]` is None.
|
340 |
+
|
341 |
+
.. versionadded:: 1.1
|
342 |
+
|
343 |
+
Parameters
|
344 |
+
----------
|
345 |
+
n_samples : int
|
346 |
+
Number of samples in training set.
|
347 |
+
category_counts: list of ndarray
|
348 |
+
`category_counts[i]` is the category counts corresponding to
|
349 |
+
`self.categories_[i]`.
|
350 |
+
missing_indices : dict
|
351 |
+
Dict mapping from feature_idx to category index with a missing value.
|
352 |
+
"""
|
353 |
+
# Remove missing value from counts, so it is not considered as infrequent
|
354 |
+
if missing_indices:
|
355 |
+
category_counts_ = []
|
356 |
+
for feature_idx, count in enumerate(category_counts):
|
357 |
+
if feature_idx in missing_indices:
|
358 |
+
category_counts_.append(
|
359 |
+
np.delete(count, missing_indices[feature_idx])
|
360 |
+
)
|
361 |
+
else:
|
362 |
+
category_counts_.append(count)
|
363 |
+
else:
|
364 |
+
category_counts_ = category_counts
|
365 |
+
|
366 |
+
self._infrequent_indices = [
|
367 |
+
self._identify_infrequent(category_count, n_samples, col_idx)
|
368 |
+
for col_idx, category_count in enumerate(category_counts_)
|
369 |
+
]
|
370 |
+
|
371 |
+
# compute mapping from default mapping to infrequent mapping
|
372 |
+
self._default_to_infrequent_mappings = []
|
373 |
+
|
374 |
+
for feature_idx, infreq_idx in enumerate(self._infrequent_indices):
|
375 |
+
cats = self.categories_[feature_idx]
|
376 |
+
# no infrequent categories
|
377 |
+
if infreq_idx is None:
|
378 |
+
self._default_to_infrequent_mappings.append(None)
|
379 |
+
continue
|
380 |
+
|
381 |
+
n_cats = len(cats)
|
382 |
+
if feature_idx in missing_indices:
|
383 |
+
# Missing index was removed from this category when computing
|
384 |
+
# infrequent indices, thus we need to decrease the number of
|
385 |
+
# total categories when considering the infrequent mapping.
|
386 |
+
n_cats -= 1
|
387 |
+
|
388 |
+
# infrequent indices exist
|
389 |
+
mapping = np.empty(n_cats, dtype=np.int64)
|
390 |
+
n_infrequent_cats = infreq_idx.size
|
391 |
+
|
392 |
+
# infrequent categories are mapped to the last element.
|
393 |
+
n_frequent_cats = n_cats - n_infrequent_cats
|
394 |
+
mapping[infreq_idx] = n_frequent_cats
|
395 |
+
|
396 |
+
frequent_indices = np.setdiff1d(np.arange(n_cats), infreq_idx)
|
397 |
+
mapping[frequent_indices] = np.arange(n_frequent_cats)
|
398 |
+
|
399 |
+
self._default_to_infrequent_mappings.append(mapping)
|
400 |
+
|
401 |
+
def _map_infrequent_categories(self, X_int, X_mask, ignore_category_indices):
|
402 |
+
"""Map infrequent categories to integer representing the infrequent category.
|
403 |
+
|
404 |
+
This modifies X_int in-place. Values that were invalid based on `X_mask`
|
405 |
+
are mapped to the infrequent category if there was an infrequent
|
406 |
+
category for that feature.
|
407 |
+
|
408 |
+
Parameters
|
409 |
+
----------
|
410 |
+
X_int: ndarray of shape (n_samples, n_features)
|
411 |
+
Integer encoded categories.
|
412 |
+
|
413 |
+
X_mask: ndarray of shape (n_samples, n_features)
|
414 |
+
Bool mask for valid values in `X_int`.
|
415 |
+
|
416 |
+
ignore_category_indices : dict
|
417 |
+
Dictionary mapping from feature_idx to category index to ignore.
|
418 |
+
Ignored indexes will not be grouped and the original ordinal encoding
|
419 |
+
will remain.
|
420 |
+
"""
|
421 |
+
if not self._infrequent_enabled:
|
422 |
+
return
|
423 |
+
|
424 |
+
ignore_category_indices = ignore_category_indices or {}
|
425 |
+
|
426 |
+
for col_idx in range(X_int.shape[1]):
|
427 |
+
infrequent_idx = self._infrequent_indices[col_idx]
|
428 |
+
if infrequent_idx is None:
|
429 |
+
continue
|
430 |
+
|
431 |
+
X_int[~X_mask[:, col_idx], col_idx] = infrequent_idx[0]
|
432 |
+
if self.handle_unknown == "infrequent_if_exist":
|
433 |
+
# All the unknown values are now mapped to the
|
434 |
+
# infrequent_idx[0], which makes the unknown values valid
|
435 |
+
# This is needed in `transform` when the encoding is formed
|
436 |
+
# using `X_mask`.
|
437 |
+
X_mask[:, col_idx] = True
|
438 |
+
|
439 |
+
# Remaps encoding in `X_int` where the infrequent categories are
|
440 |
+
# grouped together.
|
441 |
+
for i, mapping in enumerate(self._default_to_infrequent_mappings):
|
442 |
+
if mapping is None:
|
443 |
+
continue
|
444 |
+
|
445 |
+
if i in ignore_category_indices:
|
446 |
+
# Update rows that are **not** ignored
|
447 |
+
rows_to_update = X_int[:, i] != ignore_category_indices[i]
|
448 |
+
else:
|
449 |
+
rows_to_update = slice(None)
|
450 |
+
|
451 |
+
X_int[rows_to_update, i] = np.take(mapping, X_int[rows_to_update, i])
|
452 |
+
|
453 |
+
def _more_tags(self):
|
454 |
+
return {"X_types": ["2darray", "categorical"], "allow_nan": True}
|
455 |
+
|
456 |
+
|
457 |
+
class OneHotEncoder(_BaseEncoder):
|
458 |
+
"""
|
459 |
+
Encode categorical features as a one-hot numeric array.
|
460 |
+
|
461 |
+
The input to this transformer should be an array-like of integers or
|
462 |
+
strings, denoting the values taken on by categorical (discrete) features.
|
463 |
+
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
|
464 |
+
encoding scheme. This creates a binary column for each category and
|
465 |
+
returns a sparse matrix or dense array (depending on the ``sparse_output``
|
466 |
+
parameter).
|
467 |
+
|
468 |
+
By default, the encoder derives the categories based on the unique values
|
469 |
+
in each feature. Alternatively, you can also specify the `categories`
|
470 |
+
manually.
|
471 |
+
|
472 |
+
This encoding is needed for feeding categorical data to many scikit-learn
|
473 |
+
estimators, notably linear models and SVMs with the standard kernels.
|
474 |
+
|
475 |
+
Note: a one-hot encoding of y labels should use a LabelBinarizer
|
476 |
+
instead.
|
477 |
+
|
478 |
+
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
|
479 |
+
For a comparison of different encoders, refer to:
|
480 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`.
|
481 |
+
|
482 |
+
Parameters
|
483 |
+
----------
|
484 |
+
categories : 'auto' or a list of array-like, default='auto'
|
485 |
+
Categories (unique values) per feature:
|
486 |
+
|
487 |
+
- 'auto' : Determine categories automatically from the training data.
|
488 |
+
- list : ``categories[i]`` holds the categories expected in the ith
|
489 |
+
column. The passed categories should not mix strings and numeric
|
490 |
+
values within a single feature, and should be sorted in case of
|
491 |
+
numeric values.
|
492 |
+
|
493 |
+
The used categories can be found in the ``categories_`` attribute.
|
494 |
+
|
495 |
+
.. versionadded:: 0.20
|
496 |
+
|
497 |
+
drop : {'first', 'if_binary'} or an array-like of shape (n_features,), \
|
498 |
+
default=None
|
499 |
+
Specifies a methodology to use to drop one of the categories per
|
500 |
+
feature. This is useful in situations where perfectly collinear
|
501 |
+
features cause problems, such as when feeding the resulting data
|
502 |
+
into an unregularized linear regression model.
|
503 |
+
|
504 |
+
However, dropping one category breaks the symmetry of the original
|
505 |
+
representation and can therefore induce a bias in downstream models,
|
506 |
+
for instance for penalized linear classification or regression models.
|
507 |
+
|
508 |
+
- None : retain all features (the default).
|
509 |
+
- 'first' : drop the first category in each feature. If only one
|
510 |
+
category is present, the feature will be dropped entirely.
|
511 |
+
- 'if_binary' : drop the first category in each feature with two
|
512 |
+
categories. Features with 1 or more than 2 categories are
|
513 |
+
left intact.
|
514 |
+
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that
|
515 |
+
should be dropped.
|
516 |
+
|
517 |
+
When `max_categories` or `min_frequency` is configured to group
|
518 |
+
infrequent categories, the dropping behavior is handled after the
|
519 |
+
grouping.
|
520 |
+
|
521 |
+
.. versionadded:: 0.21
|
522 |
+
The parameter `drop` was added in 0.21.
|
523 |
+
|
524 |
+
.. versionchanged:: 0.23
|
525 |
+
The option `drop='if_binary'` was added in 0.23.
|
526 |
+
|
527 |
+
.. versionchanged:: 1.1
|
528 |
+
Support for dropping infrequent categories.
|
529 |
+
|
530 |
+
sparse_output : bool, default=True
|
531 |
+
When ``True``, it returns a :class:`scipy.sparse.csr_matrix`,
|
532 |
+
i.e. a sparse matrix in "Compressed Sparse Row" (CSR) format.
|
533 |
+
|
534 |
+
.. versionadded:: 1.2
|
535 |
+
`sparse` was renamed to `sparse_output`
|
536 |
+
|
537 |
+
dtype : number type, default=np.float64
|
538 |
+
Desired dtype of output.
|
539 |
+
|
540 |
+
handle_unknown : {'error', 'ignore', 'infrequent_if_exist'}, \
|
541 |
+
default='error'
|
542 |
+
Specifies the way unknown categories are handled during :meth:`transform`.
|
543 |
+
|
544 |
+
- 'error' : Raise an error if an unknown category is present during transform.
|
545 |
+
- 'ignore' : When an unknown category is encountered during
|
546 |
+
transform, the resulting one-hot encoded columns for this feature
|
547 |
+
will be all zeros. In the inverse transform, an unknown category
|
548 |
+
will be denoted as None.
|
549 |
+
- 'infrequent_if_exist' : When an unknown category is encountered
|
550 |
+
during transform, the resulting one-hot encoded columns for this
|
551 |
+
feature will map to the infrequent category if it exists. The
|
552 |
+
infrequent category will be mapped to the last position in the
|
553 |
+
encoding. During inverse transform, an unknown category will be
|
554 |
+
mapped to the category denoted `'infrequent'` if it exists. If the
|
555 |
+
`'infrequent'` category does not exist, then :meth:`transform` and
|
556 |
+
:meth:`inverse_transform` will handle an unknown category as with
|
557 |
+
`handle_unknown='ignore'`. Infrequent categories exist based on
|
558 |
+
`min_frequency` and `max_categories`. Read more in the
|
559 |
+
:ref:`User Guide <encoder_infrequent_categories>`.
|
560 |
+
|
561 |
+
.. versionchanged:: 1.1
|
562 |
+
`'infrequent_if_exist'` was added to automatically handle unknown
|
563 |
+
categories and infrequent categories.
|
564 |
+
|
565 |
+
min_frequency : int or float, default=None
|
566 |
+
Specifies the minimum frequency below which a category will be
|
567 |
+
considered infrequent.
|
568 |
+
|
569 |
+
- If `int`, categories with a smaller cardinality will be considered
|
570 |
+
infrequent.
|
571 |
+
|
572 |
+
- If `float`, categories with a smaller cardinality than
|
573 |
+
`min_frequency * n_samples` will be considered infrequent.
|
574 |
+
|
575 |
+
.. versionadded:: 1.1
|
576 |
+
Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
|
577 |
+
|
578 |
+
max_categories : int, default=None
|
579 |
+
Specifies an upper limit to the number of output features for each input
|
580 |
+
feature when considering infrequent categories. If there are infrequent
|
581 |
+
categories, `max_categories` includes the category representing the
|
582 |
+
infrequent categories along with the frequent categories. If `None`,
|
583 |
+
there is no limit to the number of output features.
|
584 |
+
|
585 |
+
.. versionadded:: 1.1
|
586 |
+
Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
|
587 |
+
|
588 |
+
feature_name_combiner : "concat" or callable, default="concat"
|
589 |
+
Callable with signature `def callable(input_feature, category)` that returns a
|
590 |
+
string. This is used to create feature names to be returned by
|
591 |
+
:meth:`get_feature_names_out`.
|
592 |
+
|
593 |
+
`"concat"` concatenates encoded feature name and category with
|
594 |
+
`feature + "_" + str(category)`.E.g. feature X with values 1, 6, 7 create
|
595 |
+
feature names `X_1, X_6, X_7`.
|
596 |
+
|
597 |
+
.. versionadded:: 1.3
|
598 |
+
|
599 |
+
Attributes
|
600 |
+
----------
|
601 |
+
categories_ : list of arrays
|
602 |
+
The categories of each feature determined during fitting
|
603 |
+
(in order of the features in X and corresponding with the output
|
604 |
+
of ``transform``). This includes the category specified in ``drop``
|
605 |
+
(if any).
|
606 |
+
|
607 |
+
drop_idx_ : array of shape (n_features,)
|
608 |
+
- ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category
|
609 |
+
to be dropped for each feature.
|
610 |
+
- ``drop_idx_[i] = None`` if no category is to be dropped from the
|
611 |
+
feature with index ``i``, e.g. when `drop='if_binary'` and the
|
612 |
+
feature isn't binary.
|
613 |
+
- ``drop_idx_ = None`` if all the transformed features will be
|
614 |
+
retained.
|
615 |
+
|
616 |
+
If infrequent categories are enabled by setting `min_frequency` or
|
617 |
+
`max_categories` to a non-default value and `drop_idx[i]` corresponds
|
618 |
+
to a infrequent category, then the entire infrequent category is
|
619 |
+
dropped.
|
620 |
+
|
621 |
+
.. versionchanged:: 0.23
|
622 |
+
Added the possibility to contain `None` values.
|
623 |
+
|
624 |
+
infrequent_categories_ : list of ndarray
|
625 |
+
Defined only if infrequent categories are enabled by setting
|
626 |
+
`min_frequency` or `max_categories` to a non-default value.
|
627 |
+
`infrequent_categories_[i]` are the infrequent categories for feature
|
628 |
+
`i`. If the feature `i` has no infrequent categories
|
629 |
+
`infrequent_categories_[i]` is None.
|
630 |
+
|
631 |
+
.. versionadded:: 1.1
|
632 |
+
|
633 |
+
n_features_in_ : int
|
634 |
+
Number of features seen during :term:`fit`.
|
635 |
+
|
636 |
+
.. versionadded:: 1.0
|
637 |
+
|
638 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
639 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
640 |
+
has feature names that are all strings.
|
641 |
+
|
642 |
+
.. versionadded:: 1.0
|
643 |
+
|
644 |
+
feature_name_combiner : callable or None
|
645 |
+
Callable with signature `def callable(input_feature, category)` that returns a
|
646 |
+
string. This is used to create feature names to be returned by
|
647 |
+
:meth:`get_feature_names_out`.
|
648 |
+
|
649 |
+
.. versionadded:: 1.3
|
650 |
+
|
651 |
+
See Also
|
652 |
+
--------
|
653 |
+
OrdinalEncoder : Performs an ordinal (integer)
|
654 |
+
encoding of the categorical features.
|
655 |
+
TargetEncoder : Encodes categorical features using the target.
|
656 |
+
sklearn.feature_extraction.DictVectorizer : Performs a one-hot encoding of
|
657 |
+
dictionary items (also handles string-valued features).
|
658 |
+
sklearn.feature_extraction.FeatureHasher : Performs an approximate one-hot
|
659 |
+
encoding of dictionary items or strings.
|
660 |
+
LabelBinarizer : Binarizes labels in a one-vs-all
|
661 |
+
fashion.
|
662 |
+
MultiLabelBinarizer : Transforms between iterable of
|
663 |
+
iterables and a multilabel format, e.g. a (samples x classes) binary
|
664 |
+
matrix indicating the presence of a class label.
|
665 |
+
|
666 |
+
Examples
|
667 |
+
--------
|
668 |
+
Given a dataset with two features, we let the encoder find the unique
|
669 |
+
values per feature and transform the data to a binary one-hot encoding.
|
670 |
+
|
671 |
+
>>> from sklearn.preprocessing import OneHotEncoder
|
672 |
+
|
673 |
+
One can discard categories not seen during `fit`:
|
674 |
+
|
675 |
+
>>> enc = OneHotEncoder(handle_unknown='ignore')
|
676 |
+
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
|
677 |
+
>>> enc.fit(X)
|
678 |
+
OneHotEncoder(handle_unknown='ignore')
|
679 |
+
>>> enc.categories_
|
680 |
+
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
|
681 |
+
>>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
|
682 |
+
array([[1., 0., 1., 0., 0.],
|
683 |
+
[0., 1., 0., 0., 0.]])
|
684 |
+
>>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
|
685 |
+
array([['Male', 1],
|
686 |
+
[None, 2]], dtype=object)
|
687 |
+
>>> enc.get_feature_names_out(['gender', 'group'])
|
688 |
+
array(['gender_Female', 'gender_Male', 'group_1', 'group_2', 'group_3'], ...)
|
689 |
+
|
690 |
+
One can always drop the first column for each feature:
|
691 |
+
|
692 |
+
>>> drop_enc = OneHotEncoder(drop='first').fit(X)
|
693 |
+
>>> drop_enc.categories_
|
694 |
+
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
|
695 |
+
>>> drop_enc.transform([['Female', 1], ['Male', 2]]).toarray()
|
696 |
+
array([[0., 0., 0.],
|
697 |
+
[1., 1., 0.]])
|
698 |
+
|
699 |
+
Or drop a column for feature only having 2 categories:
|
700 |
+
|
701 |
+
>>> drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X)
|
702 |
+
>>> drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray()
|
703 |
+
array([[0., 1., 0., 0.],
|
704 |
+
[1., 0., 1., 0.]])
|
705 |
+
|
706 |
+
One can change the way feature names are created.
|
707 |
+
|
708 |
+
>>> def custom_combiner(feature, category):
|
709 |
+
... return str(feature) + "_" + type(category).__name__ + "_" + str(category)
|
710 |
+
>>> custom_fnames_enc = OneHotEncoder(feature_name_combiner=custom_combiner).fit(X)
|
711 |
+
>>> custom_fnames_enc.get_feature_names_out()
|
712 |
+
array(['x0_str_Female', 'x0_str_Male', 'x1_int_1', 'x1_int_2', 'x1_int_3'],
|
713 |
+
dtype=object)
|
714 |
+
|
715 |
+
Infrequent categories are enabled by setting `max_categories` or `min_frequency`.
|
716 |
+
|
717 |
+
>>> import numpy as np
|
718 |
+
>>> X = np.array([["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3], dtype=object).T
|
719 |
+
>>> ohe = OneHotEncoder(max_categories=3, sparse_output=False).fit(X)
|
720 |
+
>>> ohe.infrequent_categories_
|
721 |
+
[array(['a', 'd'], dtype=object)]
|
722 |
+
>>> ohe.transform([["a"], ["b"]])
|
723 |
+
array([[0., 0., 1.],
|
724 |
+
[1., 0., 0.]])
|
725 |
+
"""
|
726 |
+
|
727 |
+
_parameter_constraints: dict = {
|
728 |
+
"categories": [StrOptions({"auto"}), list],
|
729 |
+
"drop": [StrOptions({"first", "if_binary"}), "array-like", None],
|
730 |
+
"dtype": "no_validation", # validation delegated to numpy
|
731 |
+
"handle_unknown": [StrOptions({"error", "ignore", "infrequent_if_exist"})],
|
732 |
+
"max_categories": [Interval(Integral, 1, None, closed="left"), None],
|
733 |
+
"min_frequency": [
|
734 |
+
Interval(Integral, 1, None, closed="left"),
|
735 |
+
Interval(RealNotInt, 0, 1, closed="neither"),
|
736 |
+
None,
|
737 |
+
],
|
738 |
+
"sparse_output": ["boolean"],
|
739 |
+
"feature_name_combiner": [StrOptions({"concat"}), callable],
|
740 |
+
}
|
741 |
+
|
742 |
+
def __init__(
|
743 |
+
self,
|
744 |
+
*,
|
745 |
+
categories="auto",
|
746 |
+
drop=None,
|
747 |
+
sparse_output=True,
|
748 |
+
dtype=np.float64,
|
749 |
+
handle_unknown="error",
|
750 |
+
min_frequency=None,
|
751 |
+
max_categories=None,
|
752 |
+
feature_name_combiner="concat",
|
753 |
+
):
|
754 |
+
self.categories = categories
|
755 |
+
self.sparse_output = sparse_output
|
756 |
+
self.dtype = dtype
|
757 |
+
self.handle_unknown = handle_unknown
|
758 |
+
self.drop = drop
|
759 |
+
self.min_frequency = min_frequency
|
760 |
+
self.max_categories = max_categories
|
761 |
+
self.feature_name_combiner = feature_name_combiner
|
762 |
+
|
763 |
+
def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):
|
764 |
+
"""Convert `drop_idx` into the index for infrequent categories.
|
765 |
+
|
766 |
+
If there are no infrequent categories, then `drop_idx` is
|
767 |
+
returned. This method is called in `_set_drop_idx` when the `drop`
|
768 |
+
parameter is an array-like.
|
769 |
+
"""
|
770 |
+
if not self._infrequent_enabled:
|
771 |
+
return drop_idx
|
772 |
+
|
773 |
+
default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]
|
774 |
+
if default_to_infrequent is None:
|
775 |
+
return drop_idx
|
776 |
+
|
777 |
+
# Raise error when explicitly dropping a category that is infrequent
|
778 |
+
infrequent_indices = self._infrequent_indices[feature_idx]
|
779 |
+
if infrequent_indices is not None and drop_idx in infrequent_indices:
|
780 |
+
categories = self.categories_[feature_idx]
|
781 |
+
raise ValueError(
|
782 |
+
f"Unable to drop category {categories[drop_idx].item()!r} from"
|
783 |
+
f" feature {feature_idx} because it is infrequent"
|
784 |
+
)
|
785 |
+
return default_to_infrequent[drop_idx]
|
786 |
+
|
787 |
+
def _set_drop_idx(self):
|
788 |
+
"""Compute the drop indices associated with `self.categories_`.
|
789 |
+
|
790 |
+
If `self.drop` is:
|
791 |
+
- `None`, No categories have been dropped.
|
792 |
+
- `'first'`, All zeros to drop the first category.
|
793 |
+
- `'if_binary'`, All zeros if the category is binary and `None`
|
794 |
+
otherwise.
|
795 |
+
- array-like, The indices of the categories that match the
|
796 |
+
categories in `self.drop`. If the dropped category is an infrequent
|
797 |
+
category, then the index for the infrequent category is used. This
|
798 |
+
means that the entire infrequent category is dropped.
|
799 |
+
|
800 |
+
This methods defines a public `drop_idx_` and a private
|
801 |
+
`_drop_idx_after_grouping`.
|
802 |
+
|
803 |
+
- `drop_idx_`: Public facing API that references the drop category in
|
804 |
+
`self.categories_`.
|
805 |
+
- `_drop_idx_after_grouping`: Used internally to drop categories *after* the
|
806 |
+
infrequent categories are grouped together.
|
807 |
+
|
808 |
+
If there are no infrequent categories or drop is `None`, then
|
809 |
+
`drop_idx_=_drop_idx_after_grouping`.
|
810 |
+
"""
|
811 |
+
if self.drop is None:
|
812 |
+
drop_idx_after_grouping = None
|
813 |
+
elif isinstance(self.drop, str):
|
814 |
+
if self.drop == "first":
|
815 |
+
drop_idx_after_grouping = np.zeros(len(self.categories_), dtype=object)
|
816 |
+
elif self.drop == "if_binary":
|
817 |
+
n_features_out_no_drop = [len(cat) for cat in self.categories_]
|
818 |
+
if self._infrequent_enabled:
|
819 |
+
for i, infreq_idx in enumerate(self._infrequent_indices):
|
820 |
+
if infreq_idx is None:
|
821 |
+
continue
|
822 |
+
n_features_out_no_drop[i] -= infreq_idx.size - 1
|
823 |
+
|
824 |
+
drop_idx_after_grouping = np.array(
|
825 |
+
[
|
826 |
+
0 if n_features_out == 2 else None
|
827 |
+
for n_features_out in n_features_out_no_drop
|
828 |
+
],
|
829 |
+
dtype=object,
|
830 |
+
)
|
831 |
+
|
832 |
+
else:
|
833 |
+
drop_array = np.asarray(self.drop, dtype=object)
|
834 |
+
droplen = len(drop_array)
|
835 |
+
|
836 |
+
if droplen != len(self.categories_):
|
837 |
+
msg = (
|
838 |
+
"`drop` should have length equal to the number "
|
839 |
+
"of features ({}), got {}"
|
840 |
+
)
|
841 |
+
raise ValueError(msg.format(len(self.categories_), droplen))
|
842 |
+
missing_drops = []
|
843 |
+
drop_indices = []
|
844 |
+
for feature_idx, (drop_val, cat_list) in enumerate(
|
845 |
+
zip(drop_array, self.categories_)
|
846 |
+
):
|
847 |
+
if not is_scalar_nan(drop_val):
|
848 |
+
drop_idx = np.where(cat_list == drop_val)[0]
|
849 |
+
if drop_idx.size: # found drop idx
|
850 |
+
drop_indices.append(
|
851 |
+
self._map_drop_idx_to_infrequent(feature_idx, drop_idx[0])
|
852 |
+
)
|
853 |
+
else:
|
854 |
+
missing_drops.append((feature_idx, drop_val))
|
855 |
+
continue
|
856 |
+
|
857 |
+
# drop_val is nan, find nan in categories manually
|
858 |
+
if is_scalar_nan(cat_list[-1]):
|
859 |
+
drop_indices.append(
|
860 |
+
self._map_drop_idx_to_infrequent(feature_idx, cat_list.size - 1)
|
861 |
+
)
|
862 |
+
else: # nan is missing
|
863 |
+
missing_drops.append((feature_idx, drop_val))
|
864 |
+
|
865 |
+
if any(missing_drops):
|
866 |
+
msg = (
|
867 |
+
"The following categories were supposed to be "
|
868 |
+
"dropped, but were not found in the training "
|
869 |
+
"data.\n{}".format(
|
870 |
+
"\n".join(
|
871 |
+
[
|
872 |
+
"Category: {}, Feature: {}".format(c, v)
|
873 |
+
for c, v in missing_drops
|
874 |
+
]
|
875 |
+
)
|
876 |
+
)
|
877 |
+
)
|
878 |
+
raise ValueError(msg)
|
879 |
+
drop_idx_after_grouping = np.array(drop_indices, dtype=object)
|
880 |
+
|
881 |
+
# `_drop_idx_after_grouping` are the categories to drop *after* the infrequent
|
882 |
+
# categories are grouped together. If needed, we remap `drop_idx` back
|
883 |
+
# to the categories seen in `self.categories_`.
|
884 |
+
self._drop_idx_after_grouping = drop_idx_after_grouping
|
885 |
+
|
886 |
+
if not self._infrequent_enabled or drop_idx_after_grouping is None:
|
887 |
+
self.drop_idx_ = self._drop_idx_after_grouping
|
888 |
+
else:
|
889 |
+
drop_idx_ = []
|
890 |
+
for feature_idx, drop_idx in enumerate(drop_idx_after_grouping):
|
891 |
+
default_to_infrequent = self._default_to_infrequent_mappings[
|
892 |
+
feature_idx
|
893 |
+
]
|
894 |
+
if drop_idx is None or default_to_infrequent is None:
|
895 |
+
orig_drop_idx = drop_idx
|
896 |
+
else:
|
897 |
+
orig_drop_idx = np.flatnonzero(default_to_infrequent == drop_idx)[0]
|
898 |
+
|
899 |
+
drop_idx_.append(orig_drop_idx)
|
900 |
+
|
901 |
+
self.drop_idx_ = np.asarray(drop_idx_, dtype=object)
|
902 |
+
|
903 |
+
def _compute_transformed_categories(self, i, remove_dropped=True):
|
904 |
+
"""Compute the transformed categories used for column `i`.
|
905 |
+
|
906 |
+
1. If there are infrequent categories, the category is named
|
907 |
+
'infrequent_sklearn'.
|
908 |
+
2. Dropped columns are removed when remove_dropped=True.
|
909 |
+
"""
|
910 |
+
cats = self.categories_[i]
|
911 |
+
|
912 |
+
if self._infrequent_enabled:
|
913 |
+
infreq_map = self._default_to_infrequent_mappings[i]
|
914 |
+
if infreq_map is not None:
|
915 |
+
frequent_mask = infreq_map < infreq_map.max()
|
916 |
+
infrequent_cat = "infrequent_sklearn"
|
917 |
+
# infrequent category is always at the end
|
918 |
+
cats = np.concatenate(
|
919 |
+
(cats[frequent_mask], np.array([infrequent_cat], dtype=object))
|
920 |
+
)
|
921 |
+
|
922 |
+
if remove_dropped:
|
923 |
+
cats = self._remove_dropped_categories(cats, i)
|
924 |
+
return cats
|
925 |
+
|
926 |
+
def _remove_dropped_categories(self, categories, i):
|
927 |
+
"""Remove dropped categories."""
|
928 |
+
if (
|
929 |
+
self._drop_idx_after_grouping is not None
|
930 |
+
and self._drop_idx_after_grouping[i] is not None
|
931 |
+
):
|
932 |
+
return np.delete(categories, self._drop_idx_after_grouping[i])
|
933 |
+
return categories
|
934 |
+
|
935 |
+
def _compute_n_features_outs(self):
|
936 |
+
"""Compute the n_features_out for each input feature."""
|
937 |
+
output = [len(cats) for cats in self.categories_]
|
938 |
+
|
939 |
+
if self._drop_idx_after_grouping is not None:
|
940 |
+
for i, drop_idx in enumerate(self._drop_idx_after_grouping):
|
941 |
+
if drop_idx is not None:
|
942 |
+
output[i] -= 1
|
943 |
+
|
944 |
+
if not self._infrequent_enabled:
|
945 |
+
return output
|
946 |
+
|
947 |
+
# infrequent is enabled, the number of features out are reduced
|
948 |
+
# because the infrequent categories are grouped together
|
949 |
+
for i, infreq_idx in enumerate(self._infrequent_indices):
|
950 |
+
if infreq_idx is None:
|
951 |
+
continue
|
952 |
+
output[i] -= infreq_idx.size - 1
|
953 |
+
|
954 |
+
return output
|
955 |
+
|
956 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
957 |
+
def fit(self, X, y=None):
|
958 |
+
"""
|
959 |
+
Fit OneHotEncoder to X.
|
960 |
+
|
961 |
+
Parameters
|
962 |
+
----------
|
963 |
+
X : array-like of shape (n_samples, n_features)
|
964 |
+
The data to determine the categories of each feature.
|
965 |
+
|
966 |
+
y : None
|
967 |
+
Ignored. This parameter exists only for compatibility with
|
968 |
+
:class:`~sklearn.pipeline.Pipeline`.
|
969 |
+
|
970 |
+
Returns
|
971 |
+
-------
|
972 |
+
self
|
973 |
+
Fitted encoder.
|
974 |
+
"""
|
975 |
+
self._fit(
|
976 |
+
X,
|
977 |
+
handle_unknown=self.handle_unknown,
|
978 |
+
force_all_finite="allow-nan",
|
979 |
+
)
|
980 |
+
self._set_drop_idx()
|
981 |
+
self._n_features_outs = self._compute_n_features_outs()
|
982 |
+
return self
|
983 |
+
|
984 |
+
def transform(self, X):
|
985 |
+
"""
|
986 |
+
Transform X using one-hot encoding.
|
987 |
+
|
988 |
+
If `sparse_output=True` (default), it returns an instance of
|
989 |
+
:class:`scipy.sparse._csr.csr_matrix` (CSR format).
|
990 |
+
|
991 |
+
If there are infrequent categories for a feature, set by specifying
|
992 |
+
`max_categories` or `min_frequency`, the infrequent categories are
|
993 |
+
grouped into a single category.
|
994 |
+
|
995 |
+
Parameters
|
996 |
+
----------
|
997 |
+
X : array-like of shape (n_samples, n_features)
|
998 |
+
The data to encode.
|
999 |
+
|
1000 |
+
Returns
|
1001 |
+
-------
|
1002 |
+
X_out : {ndarray, sparse matrix} of shape \
|
1003 |
+
(n_samples, n_encoded_features)
|
1004 |
+
Transformed input. If `sparse_output=True`, a sparse matrix will be
|
1005 |
+
returned.
|
1006 |
+
"""
|
1007 |
+
check_is_fitted(self)
|
1008 |
+
transform_output = _get_output_config("transform", estimator=self)["dense"]
|
1009 |
+
if transform_output != "default" and self.sparse_output:
|
1010 |
+
capitalize_transform_output = transform_output.capitalize()
|
1011 |
+
raise ValueError(
|
1012 |
+
f"{capitalize_transform_output} output does not support sparse data."
|
1013 |
+
f" Set sparse_output=False to output {transform_output} dataframes or"
|
1014 |
+
f" disable {capitalize_transform_output} output via"
|
1015 |
+
'` ohe.set_output(transform="default").'
|
1016 |
+
)
|
1017 |
+
|
1018 |
+
# validation of X happens in _check_X called by _transform
|
1019 |
+
warn_on_unknown = self.drop is not None and self.handle_unknown in {
|
1020 |
+
"ignore",
|
1021 |
+
"infrequent_if_exist",
|
1022 |
+
}
|
1023 |
+
X_int, X_mask = self._transform(
|
1024 |
+
X,
|
1025 |
+
handle_unknown=self.handle_unknown,
|
1026 |
+
force_all_finite="allow-nan",
|
1027 |
+
warn_on_unknown=warn_on_unknown,
|
1028 |
+
)
|
1029 |
+
|
1030 |
+
n_samples, n_features = X_int.shape
|
1031 |
+
|
1032 |
+
if self._drop_idx_after_grouping is not None:
|
1033 |
+
to_drop = self._drop_idx_after_grouping.copy()
|
1034 |
+
# We remove all the dropped categories from mask, and decrement all
|
1035 |
+
# categories that occur after them to avoid an empty column.
|
1036 |
+
keep_cells = X_int != to_drop
|
1037 |
+
for i, cats in enumerate(self.categories_):
|
1038 |
+
# drop='if_binary' but feature isn't binary
|
1039 |
+
if to_drop[i] is None:
|
1040 |
+
# set to cardinality to not drop from X_int
|
1041 |
+
to_drop[i] = len(cats)
|
1042 |
+
|
1043 |
+
to_drop = to_drop.reshape(1, -1)
|
1044 |
+
X_int[X_int > to_drop] -= 1
|
1045 |
+
X_mask &= keep_cells
|
1046 |
+
|
1047 |
+
mask = X_mask.ravel()
|
1048 |
+
feature_indices = np.cumsum([0] + self._n_features_outs)
|
1049 |
+
indices = (X_int + feature_indices[:-1]).ravel()[mask]
|
1050 |
+
|
1051 |
+
indptr = np.empty(n_samples + 1, dtype=int)
|
1052 |
+
indptr[0] = 0
|
1053 |
+
np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype)
|
1054 |
+
np.cumsum(indptr[1:], out=indptr[1:])
|
1055 |
+
data = np.ones(indptr[-1])
|
1056 |
+
|
1057 |
+
out = sparse.csr_matrix(
|
1058 |
+
(data, indices, indptr),
|
1059 |
+
shape=(n_samples, feature_indices[-1]),
|
1060 |
+
dtype=self.dtype,
|
1061 |
+
)
|
1062 |
+
if not self.sparse_output:
|
1063 |
+
return out.toarray()
|
1064 |
+
else:
|
1065 |
+
return out
|
1066 |
+
|
1067 |
+
def inverse_transform(self, X):
|
1068 |
+
"""
|
1069 |
+
Convert the data back to the original representation.
|
1070 |
+
|
1071 |
+
When unknown categories are encountered (all zeros in the
|
1072 |
+
one-hot encoding), ``None`` is used to represent this category. If the
|
1073 |
+
feature with the unknown category has a dropped category, the dropped
|
1074 |
+
category will be its inverse.
|
1075 |
+
|
1076 |
+
For a given input feature, if there is an infrequent category,
|
1077 |
+
'infrequent_sklearn' will be used to represent the infrequent category.
|
1078 |
+
|
1079 |
+
Parameters
|
1080 |
+
----------
|
1081 |
+
X : {array-like, sparse matrix} of shape \
|
1082 |
+
(n_samples, n_encoded_features)
|
1083 |
+
The transformed data.
|
1084 |
+
|
1085 |
+
Returns
|
1086 |
+
-------
|
1087 |
+
X_tr : ndarray of shape (n_samples, n_features)
|
1088 |
+
Inverse transformed array.
|
1089 |
+
"""
|
1090 |
+
check_is_fitted(self)
|
1091 |
+
X = check_array(X, accept_sparse="csr")
|
1092 |
+
|
1093 |
+
n_samples, _ = X.shape
|
1094 |
+
n_features = len(self.categories_)
|
1095 |
+
|
1096 |
+
n_features_out = np.sum(self._n_features_outs)
|
1097 |
+
|
1098 |
+
# validate shape of passed X
|
1099 |
+
msg = (
|
1100 |
+
"Shape of the passed X data is not correct. Expected {0} columns, got {1}."
|
1101 |
+
)
|
1102 |
+
if X.shape[1] != n_features_out:
|
1103 |
+
raise ValueError(msg.format(n_features_out, X.shape[1]))
|
1104 |
+
|
1105 |
+
transformed_features = [
|
1106 |
+
self._compute_transformed_categories(i, remove_dropped=False)
|
1107 |
+
for i, _ in enumerate(self.categories_)
|
1108 |
+
]
|
1109 |
+
|
1110 |
+
# create resulting array of appropriate dtype
|
1111 |
+
dt = np.result_type(*[cat.dtype for cat in transformed_features])
|
1112 |
+
X_tr = np.empty((n_samples, n_features), dtype=dt)
|
1113 |
+
|
1114 |
+
j = 0
|
1115 |
+
found_unknown = {}
|
1116 |
+
|
1117 |
+
if self._infrequent_enabled:
|
1118 |
+
infrequent_indices = self._infrequent_indices
|
1119 |
+
else:
|
1120 |
+
infrequent_indices = [None] * n_features
|
1121 |
+
|
1122 |
+
for i in range(n_features):
|
1123 |
+
cats_wo_dropped = self._remove_dropped_categories(
|
1124 |
+
transformed_features[i], i
|
1125 |
+
)
|
1126 |
+
n_categories = cats_wo_dropped.shape[0]
|
1127 |
+
|
1128 |
+
# Only happens if there was a column with a unique
|
1129 |
+
# category. In this case we just fill the column with this
|
1130 |
+
# unique category value.
|
1131 |
+
if n_categories == 0:
|
1132 |
+
X_tr[:, i] = self.categories_[i][self._drop_idx_after_grouping[i]]
|
1133 |
+
j += n_categories
|
1134 |
+
continue
|
1135 |
+
sub = X[:, j : j + n_categories]
|
1136 |
+
# for sparse X argmax returns 2D matrix, ensure 1D array
|
1137 |
+
labels = np.asarray(sub.argmax(axis=1)).flatten()
|
1138 |
+
X_tr[:, i] = cats_wo_dropped[labels]
|
1139 |
+
|
1140 |
+
if self.handle_unknown == "ignore" or (
|
1141 |
+
self.handle_unknown == "infrequent_if_exist"
|
1142 |
+
and infrequent_indices[i] is None
|
1143 |
+
):
|
1144 |
+
unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
|
1145 |
+
# ignored unknown categories: we have a row of all zero
|
1146 |
+
if unknown.any():
|
1147 |
+
# if categories were dropped then unknown categories will
|
1148 |
+
# be mapped to the dropped category
|
1149 |
+
if (
|
1150 |
+
self._drop_idx_after_grouping is None
|
1151 |
+
or self._drop_idx_after_grouping[i] is None
|
1152 |
+
):
|
1153 |
+
found_unknown[i] = unknown
|
1154 |
+
else:
|
1155 |
+
X_tr[unknown, i] = self.categories_[i][
|
1156 |
+
self._drop_idx_after_grouping[i]
|
1157 |
+
]
|
1158 |
+
else:
|
1159 |
+
dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
|
1160 |
+
if dropped.any():
|
1161 |
+
if self._drop_idx_after_grouping is None:
|
1162 |
+
all_zero_samples = np.flatnonzero(dropped)
|
1163 |
+
raise ValueError(
|
1164 |
+
f"Samples {all_zero_samples} can not be inverted "
|
1165 |
+
"when drop=None and handle_unknown='error' "
|
1166 |
+
"because they contain all zeros"
|
1167 |
+
)
|
1168 |
+
# we can safely assume that all of the nulls in each column
|
1169 |
+
# are the dropped value
|
1170 |
+
drop_idx = self._drop_idx_after_grouping[i]
|
1171 |
+
X_tr[dropped, i] = transformed_features[i][drop_idx]
|
1172 |
+
|
1173 |
+
j += n_categories
|
1174 |
+
|
1175 |
+
# if ignored are found: potentially need to upcast result to
|
1176 |
+
# insert None values
|
1177 |
+
if found_unknown:
|
1178 |
+
if X_tr.dtype != object:
|
1179 |
+
X_tr = X_tr.astype(object)
|
1180 |
+
|
1181 |
+
for idx, mask in found_unknown.items():
|
1182 |
+
X_tr[mask, idx] = None
|
1183 |
+
|
1184 |
+
return X_tr
|
1185 |
+
|
1186 |
+
def get_feature_names_out(self, input_features=None):
|
1187 |
+
"""Get output feature names for transformation.
|
1188 |
+
|
1189 |
+
Parameters
|
1190 |
+
----------
|
1191 |
+
input_features : array-like of str or None, default=None
|
1192 |
+
Input features.
|
1193 |
+
|
1194 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
1195 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
1196 |
+
then the following input feature names are generated:
|
1197 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
1198 |
+
- If `input_features` is an array-like, then `input_features` must
|
1199 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
1200 |
+
|
1201 |
+
Returns
|
1202 |
+
-------
|
1203 |
+
feature_names_out : ndarray of str objects
|
1204 |
+
Transformed feature names.
|
1205 |
+
"""
|
1206 |
+
check_is_fitted(self)
|
1207 |
+
input_features = _check_feature_names_in(self, input_features)
|
1208 |
+
cats = [
|
1209 |
+
self._compute_transformed_categories(i)
|
1210 |
+
for i, _ in enumerate(self.categories_)
|
1211 |
+
]
|
1212 |
+
|
1213 |
+
name_combiner = self._check_get_feature_name_combiner()
|
1214 |
+
feature_names = []
|
1215 |
+
for i in range(len(cats)):
|
1216 |
+
names = [name_combiner(input_features[i], t) for t in cats[i]]
|
1217 |
+
feature_names.extend(names)
|
1218 |
+
|
1219 |
+
return np.array(feature_names, dtype=object)
|
1220 |
+
|
1221 |
+
def _check_get_feature_name_combiner(self):
|
1222 |
+
if self.feature_name_combiner == "concat":
|
1223 |
+
return lambda feature, category: feature + "_" + str(category)
|
1224 |
+
else: # callable
|
1225 |
+
dry_run_combiner = self.feature_name_combiner("feature", "category")
|
1226 |
+
if not isinstance(dry_run_combiner, str):
|
1227 |
+
raise TypeError(
|
1228 |
+
"When `feature_name_combiner` is a callable, it should return a "
|
1229 |
+
f"Python string. Got {type(dry_run_combiner)} instead."
|
1230 |
+
)
|
1231 |
+
return self.feature_name_combiner
|
1232 |
+
|
1233 |
+
|
1234 |
+
class OrdinalEncoder(OneToOneFeatureMixin, _BaseEncoder):
|
1235 |
+
"""
|
1236 |
+
Encode categorical features as an integer array.
|
1237 |
+
|
1238 |
+
The input to this transformer should be an array-like of integers or
|
1239 |
+
strings, denoting the values taken on by categorical (discrete) features.
|
1240 |
+
The features are converted to ordinal integers. This results in
|
1241 |
+
a single column of integers (0 to n_categories - 1) per feature.
|
1242 |
+
|
1243 |
+
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
|
1244 |
+
For a comparison of different encoders, refer to:
|
1245 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`.
|
1246 |
+
|
1247 |
+
.. versionadded:: 0.20
|
1248 |
+
|
1249 |
+
Parameters
|
1250 |
+
----------
|
1251 |
+
categories : 'auto' or a list of array-like, default='auto'
|
1252 |
+
Categories (unique values) per feature:
|
1253 |
+
|
1254 |
+
- 'auto' : Determine categories automatically from the training data.
|
1255 |
+
- list : ``categories[i]`` holds the categories expected in the ith
|
1256 |
+
column. The passed categories should not mix strings and numeric
|
1257 |
+
values, and should be sorted in case of numeric values.
|
1258 |
+
|
1259 |
+
The used categories can be found in the ``categories_`` attribute.
|
1260 |
+
|
1261 |
+
dtype : number type, default=np.float64
|
1262 |
+
Desired dtype of output.
|
1263 |
+
|
1264 |
+
handle_unknown : {'error', 'use_encoded_value'}, default='error'
|
1265 |
+
When set to 'error' an error will be raised in case an unknown
|
1266 |
+
categorical feature is present during transform. When set to
|
1267 |
+
'use_encoded_value', the encoded value of unknown categories will be
|
1268 |
+
set to the value given for the parameter `unknown_value`. In
|
1269 |
+
:meth:`inverse_transform`, an unknown category will be denoted as None.
|
1270 |
+
|
1271 |
+
.. versionadded:: 0.24
|
1272 |
+
|
1273 |
+
unknown_value : int or np.nan, default=None
|
1274 |
+
When the parameter handle_unknown is set to 'use_encoded_value', this
|
1275 |
+
parameter is required and will set the encoded value of unknown
|
1276 |
+
categories. It has to be distinct from the values used to encode any of
|
1277 |
+
the categories in `fit`. If set to np.nan, the `dtype` parameter must
|
1278 |
+
be a float dtype.
|
1279 |
+
|
1280 |
+
.. versionadded:: 0.24
|
1281 |
+
|
1282 |
+
encoded_missing_value : int or np.nan, default=np.nan
|
1283 |
+
Encoded value of missing categories. If set to `np.nan`, then the `dtype`
|
1284 |
+
parameter must be a float dtype.
|
1285 |
+
|
1286 |
+
.. versionadded:: 1.1
|
1287 |
+
|
1288 |
+
min_frequency : int or float, default=None
|
1289 |
+
Specifies the minimum frequency below which a category will be
|
1290 |
+
considered infrequent.
|
1291 |
+
|
1292 |
+
- If `int`, categories with a smaller cardinality will be considered
|
1293 |
+
infrequent.
|
1294 |
+
|
1295 |
+
- If `float`, categories with a smaller cardinality than
|
1296 |
+
`min_frequency * n_samples` will be considered infrequent.
|
1297 |
+
|
1298 |
+
.. versionadded:: 1.3
|
1299 |
+
Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
|
1300 |
+
|
1301 |
+
max_categories : int, default=None
|
1302 |
+
Specifies an upper limit to the number of output categories for each input
|
1303 |
+
feature when considering infrequent categories. If there are infrequent
|
1304 |
+
categories, `max_categories` includes the category representing the
|
1305 |
+
infrequent categories along with the frequent categories. If `None`,
|
1306 |
+
there is no limit to the number of output features.
|
1307 |
+
|
1308 |
+
`max_categories` do **not** take into account missing or unknown
|
1309 |
+
categories. Setting `unknown_value` or `encoded_missing_value` to an
|
1310 |
+
integer will increase the number of unique integer codes by one each.
|
1311 |
+
This can result in up to `max_categories + 2` integer codes.
|
1312 |
+
|
1313 |
+
.. versionadded:: 1.3
|
1314 |
+
Read more in the :ref:`User Guide <encoder_infrequent_categories>`.
|
1315 |
+
|
1316 |
+
Attributes
|
1317 |
+
----------
|
1318 |
+
categories_ : list of arrays
|
1319 |
+
The categories of each feature determined during ``fit`` (in order of
|
1320 |
+
the features in X and corresponding with the output of ``transform``).
|
1321 |
+
This does not include categories that weren't seen during ``fit``.
|
1322 |
+
|
1323 |
+
n_features_in_ : int
|
1324 |
+
Number of features seen during :term:`fit`.
|
1325 |
+
|
1326 |
+
.. versionadded:: 1.0
|
1327 |
+
|
1328 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
1329 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
1330 |
+
has feature names that are all strings.
|
1331 |
+
|
1332 |
+
.. versionadded:: 1.0
|
1333 |
+
|
1334 |
+
infrequent_categories_ : list of ndarray
|
1335 |
+
Defined only if infrequent categories are enabled by setting
|
1336 |
+
`min_frequency` or `max_categories` to a non-default value.
|
1337 |
+
`infrequent_categories_[i]` are the infrequent categories for feature
|
1338 |
+
`i`. If the feature `i` has no infrequent categories
|
1339 |
+
`infrequent_categories_[i]` is None.
|
1340 |
+
|
1341 |
+
.. versionadded:: 1.3
|
1342 |
+
|
1343 |
+
See Also
|
1344 |
+
--------
|
1345 |
+
OneHotEncoder : Performs a one-hot encoding of categorical features. This encoding
|
1346 |
+
is suitable for low to medium cardinality categorical variables, both in
|
1347 |
+
supervised and unsupervised settings.
|
1348 |
+
TargetEncoder : Encodes categorical features using supervised signal
|
1349 |
+
in a classification or regression pipeline. This encoding is typically
|
1350 |
+
suitable for high cardinality categorical variables.
|
1351 |
+
LabelEncoder : Encodes target labels with values between 0 and
|
1352 |
+
``n_classes-1``.
|
1353 |
+
|
1354 |
+
Notes
|
1355 |
+
-----
|
1356 |
+
With a high proportion of `nan` values, inferring categories becomes slow with
|
1357 |
+
Python versions before 3.10. The handling of `nan` values was improved
|
1358 |
+
from Python 3.10 onwards, (c.f.
|
1359 |
+
`bpo-43475 <https://github.com/python/cpython/issues/87641>`_).
|
1360 |
+
|
1361 |
+
Examples
|
1362 |
+
--------
|
1363 |
+
Given a dataset with two features, we let the encoder find the unique
|
1364 |
+
values per feature and transform the data to an ordinal encoding.
|
1365 |
+
|
1366 |
+
>>> from sklearn.preprocessing import OrdinalEncoder
|
1367 |
+
>>> enc = OrdinalEncoder()
|
1368 |
+
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
|
1369 |
+
>>> enc.fit(X)
|
1370 |
+
OrdinalEncoder()
|
1371 |
+
>>> enc.categories_
|
1372 |
+
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
|
1373 |
+
>>> enc.transform([['Female', 3], ['Male', 1]])
|
1374 |
+
array([[0., 2.],
|
1375 |
+
[1., 0.]])
|
1376 |
+
|
1377 |
+
>>> enc.inverse_transform([[1, 0], [0, 1]])
|
1378 |
+
array([['Male', 1],
|
1379 |
+
['Female', 2]], dtype=object)
|
1380 |
+
|
1381 |
+
By default, :class:`OrdinalEncoder` is lenient towards missing values by
|
1382 |
+
propagating them.
|
1383 |
+
|
1384 |
+
>>> import numpy as np
|
1385 |
+
>>> X = [['Male', 1], ['Female', 3], ['Female', np.nan]]
|
1386 |
+
>>> enc.fit_transform(X)
|
1387 |
+
array([[ 1., 0.],
|
1388 |
+
[ 0., 1.],
|
1389 |
+
[ 0., nan]])
|
1390 |
+
|
1391 |
+
You can use the parameter `encoded_missing_value` to encode missing values.
|
1392 |
+
|
1393 |
+
>>> enc.set_params(encoded_missing_value=-1).fit_transform(X)
|
1394 |
+
array([[ 1., 0.],
|
1395 |
+
[ 0., 1.],
|
1396 |
+
[ 0., -1.]])
|
1397 |
+
|
1398 |
+
Infrequent categories are enabled by setting `max_categories` or `min_frequency`.
|
1399 |
+
In the following example, "a" and "d" are considered infrequent and grouped
|
1400 |
+
together into a single category, "b" and "c" are their own categories, unknown
|
1401 |
+
values are encoded as 3 and missing values are encoded as 4.
|
1402 |
+
|
1403 |
+
>>> X_train = np.array(
|
1404 |
+
... [["a"] * 5 + ["b"] * 20 + ["c"] * 10 + ["d"] * 3 + [np.nan]],
|
1405 |
+
... dtype=object).T
|
1406 |
+
>>> enc = OrdinalEncoder(
|
1407 |
+
... handle_unknown="use_encoded_value", unknown_value=3,
|
1408 |
+
... max_categories=3, encoded_missing_value=4)
|
1409 |
+
>>> _ = enc.fit(X_train)
|
1410 |
+
>>> X_test = np.array([["a"], ["b"], ["c"], ["d"], ["e"], [np.nan]], dtype=object)
|
1411 |
+
>>> enc.transform(X_test)
|
1412 |
+
array([[2.],
|
1413 |
+
[0.],
|
1414 |
+
[1.],
|
1415 |
+
[2.],
|
1416 |
+
[3.],
|
1417 |
+
[4.]])
|
1418 |
+
"""
|
1419 |
+
|
1420 |
+
_parameter_constraints: dict = {
|
1421 |
+
"categories": [StrOptions({"auto"}), list],
|
1422 |
+
"dtype": "no_validation", # validation delegated to numpy
|
1423 |
+
"encoded_missing_value": [Integral, type(np.nan)],
|
1424 |
+
"handle_unknown": [StrOptions({"error", "use_encoded_value"})],
|
1425 |
+
"unknown_value": [Integral, type(np.nan), None],
|
1426 |
+
"max_categories": [Interval(Integral, 1, None, closed="left"), None],
|
1427 |
+
"min_frequency": [
|
1428 |
+
Interval(Integral, 1, None, closed="left"),
|
1429 |
+
Interval(RealNotInt, 0, 1, closed="neither"),
|
1430 |
+
None,
|
1431 |
+
],
|
1432 |
+
}
|
1433 |
+
|
1434 |
+
def __init__(
|
1435 |
+
self,
|
1436 |
+
*,
|
1437 |
+
categories="auto",
|
1438 |
+
dtype=np.float64,
|
1439 |
+
handle_unknown="error",
|
1440 |
+
unknown_value=None,
|
1441 |
+
encoded_missing_value=np.nan,
|
1442 |
+
min_frequency=None,
|
1443 |
+
max_categories=None,
|
1444 |
+
):
|
1445 |
+
self.categories = categories
|
1446 |
+
self.dtype = dtype
|
1447 |
+
self.handle_unknown = handle_unknown
|
1448 |
+
self.unknown_value = unknown_value
|
1449 |
+
self.encoded_missing_value = encoded_missing_value
|
1450 |
+
self.min_frequency = min_frequency
|
1451 |
+
self.max_categories = max_categories
|
1452 |
+
|
1453 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
1454 |
+
def fit(self, X, y=None):
|
1455 |
+
"""
|
1456 |
+
Fit the OrdinalEncoder to X.
|
1457 |
+
|
1458 |
+
Parameters
|
1459 |
+
----------
|
1460 |
+
X : array-like of shape (n_samples, n_features)
|
1461 |
+
The data to determine the categories of each feature.
|
1462 |
+
|
1463 |
+
y : None
|
1464 |
+
Ignored. This parameter exists only for compatibility with
|
1465 |
+
:class:`~sklearn.pipeline.Pipeline`.
|
1466 |
+
|
1467 |
+
Returns
|
1468 |
+
-------
|
1469 |
+
self : object
|
1470 |
+
Fitted encoder.
|
1471 |
+
"""
|
1472 |
+
if self.handle_unknown == "use_encoded_value":
|
1473 |
+
if is_scalar_nan(self.unknown_value):
|
1474 |
+
if np.dtype(self.dtype).kind != "f":
|
1475 |
+
raise ValueError(
|
1476 |
+
"When unknown_value is np.nan, the dtype "
|
1477 |
+
"parameter should be "
|
1478 |
+
f"a float dtype. Got {self.dtype}."
|
1479 |
+
)
|
1480 |
+
elif not isinstance(self.unknown_value, numbers.Integral):
|
1481 |
+
raise TypeError(
|
1482 |
+
"unknown_value should be an integer or "
|
1483 |
+
"np.nan when "
|
1484 |
+
"handle_unknown is 'use_encoded_value', "
|
1485 |
+
f"got {self.unknown_value}."
|
1486 |
+
)
|
1487 |
+
elif self.unknown_value is not None:
|
1488 |
+
raise TypeError(
|
1489 |
+
"unknown_value should only be set when "
|
1490 |
+
"handle_unknown is 'use_encoded_value', "
|
1491 |
+
f"got {self.unknown_value}."
|
1492 |
+
)
|
1493 |
+
|
1494 |
+
# `_fit` will only raise an error when `self.handle_unknown="error"`
|
1495 |
+
fit_results = self._fit(
|
1496 |
+
X,
|
1497 |
+
handle_unknown=self.handle_unknown,
|
1498 |
+
force_all_finite="allow-nan",
|
1499 |
+
return_and_ignore_missing_for_infrequent=True,
|
1500 |
+
)
|
1501 |
+
self._missing_indices = fit_results["missing_indices"]
|
1502 |
+
|
1503 |
+
cardinalities = [len(categories) for categories in self.categories_]
|
1504 |
+
if self._infrequent_enabled:
|
1505 |
+
# Cardinality decreases because the infrequent categories are grouped
|
1506 |
+
# together
|
1507 |
+
for feature_idx, infrequent in enumerate(self.infrequent_categories_):
|
1508 |
+
if infrequent is not None:
|
1509 |
+
cardinalities[feature_idx] -= len(infrequent)
|
1510 |
+
|
1511 |
+
# missing values are not considered part of the cardinality
|
1512 |
+
# when considering unknown categories or encoded_missing_value
|
1513 |
+
for cat_idx, categories_for_idx in enumerate(self.categories_):
|
1514 |
+
if is_scalar_nan(categories_for_idx[-1]):
|
1515 |
+
cardinalities[cat_idx] -= 1
|
1516 |
+
|
1517 |
+
if self.handle_unknown == "use_encoded_value":
|
1518 |
+
for cardinality in cardinalities:
|
1519 |
+
if 0 <= self.unknown_value < cardinality:
|
1520 |
+
raise ValueError(
|
1521 |
+
"The used value for unknown_value "
|
1522 |
+
f"{self.unknown_value} is one of the "
|
1523 |
+
"values already used for encoding the "
|
1524 |
+
"seen categories."
|
1525 |
+
)
|
1526 |
+
|
1527 |
+
if self._missing_indices:
|
1528 |
+
if np.dtype(self.dtype).kind != "f" and is_scalar_nan(
|
1529 |
+
self.encoded_missing_value
|
1530 |
+
):
|
1531 |
+
raise ValueError(
|
1532 |
+
"There are missing values in features "
|
1533 |
+
f"{list(self._missing_indices)}. For OrdinalEncoder to "
|
1534 |
+
f"encode missing values with dtype: {self.dtype}, set "
|
1535 |
+
"encoded_missing_value to a non-nan value, or "
|
1536 |
+
"set dtype to a float"
|
1537 |
+
)
|
1538 |
+
|
1539 |
+
if not is_scalar_nan(self.encoded_missing_value):
|
1540 |
+
# Features are invalid when they contain a missing category
|
1541 |
+
# and encoded_missing_value was already used to encode a
|
1542 |
+
# known category
|
1543 |
+
invalid_features = [
|
1544 |
+
cat_idx
|
1545 |
+
for cat_idx, cardinality in enumerate(cardinalities)
|
1546 |
+
if cat_idx in self._missing_indices
|
1547 |
+
and 0 <= self.encoded_missing_value < cardinality
|
1548 |
+
]
|
1549 |
+
|
1550 |
+
if invalid_features:
|
1551 |
+
# Use feature names if they are available
|
1552 |
+
if hasattr(self, "feature_names_in_"):
|
1553 |
+
invalid_features = self.feature_names_in_[invalid_features]
|
1554 |
+
raise ValueError(
|
1555 |
+
f"encoded_missing_value ({self.encoded_missing_value}) "
|
1556 |
+
"is already used to encode a known category in features: "
|
1557 |
+
f"{invalid_features}"
|
1558 |
+
)
|
1559 |
+
|
1560 |
+
return self
|
1561 |
+
|
1562 |
+
def transform(self, X):
|
1563 |
+
"""
|
1564 |
+
Transform X to ordinal codes.
|
1565 |
+
|
1566 |
+
Parameters
|
1567 |
+
----------
|
1568 |
+
X : array-like of shape (n_samples, n_features)
|
1569 |
+
The data to encode.
|
1570 |
+
|
1571 |
+
Returns
|
1572 |
+
-------
|
1573 |
+
X_out : ndarray of shape (n_samples, n_features)
|
1574 |
+
Transformed input.
|
1575 |
+
"""
|
1576 |
+
check_is_fitted(self, "categories_")
|
1577 |
+
X_int, X_mask = self._transform(
|
1578 |
+
X,
|
1579 |
+
handle_unknown=self.handle_unknown,
|
1580 |
+
force_all_finite="allow-nan",
|
1581 |
+
ignore_category_indices=self._missing_indices,
|
1582 |
+
)
|
1583 |
+
X_trans = X_int.astype(self.dtype, copy=False)
|
1584 |
+
|
1585 |
+
for cat_idx, missing_idx in self._missing_indices.items():
|
1586 |
+
X_missing_mask = X_int[:, cat_idx] == missing_idx
|
1587 |
+
X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value
|
1588 |
+
|
1589 |
+
# create separate category for unknown values
|
1590 |
+
if self.handle_unknown == "use_encoded_value":
|
1591 |
+
X_trans[~X_mask] = self.unknown_value
|
1592 |
+
return X_trans
|
1593 |
+
|
1594 |
+
def inverse_transform(self, X):
|
1595 |
+
"""
|
1596 |
+
Convert the data back to the original representation.
|
1597 |
+
|
1598 |
+
Parameters
|
1599 |
+
----------
|
1600 |
+
X : array-like of shape (n_samples, n_encoded_features)
|
1601 |
+
The transformed data.
|
1602 |
+
|
1603 |
+
Returns
|
1604 |
+
-------
|
1605 |
+
X_tr : ndarray of shape (n_samples, n_features)
|
1606 |
+
Inverse transformed array.
|
1607 |
+
"""
|
1608 |
+
check_is_fitted(self)
|
1609 |
+
X = check_array(X, force_all_finite="allow-nan")
|
1610 |
+
|
1611 |
+
n_samples, _ = X.shape
|
1612 |
+
n_features = len(self.categories_)
|
1613 |
+
|
1614 |
+
# validate shape of passed X
|
1615 |
+
msg = (
|
1616 |
+
"Shape of the passed X data is not correct. Expected {0} columns, got {1}."
|
1617 |
+
)
|
1618 |
+
if X.shape[1] != n_features:
|
1619 |
+
raise ValueError(msg.format(n_features, X.shape[1]))
|
1620 |
+
|
1621 |
+
# create resulting array of appropriate dtype
|
1622 |
+
dt = np.result_type(*[cat.dtype for cat in self.categories_])
|
1623 |
+
X_tr = np.empty((n_samples, n_features), dtype=dt)
|
1624 |
+
|
1625 |
+
found_unknown = {}
|
1626 |
+
infrequent_masks = {}
|
1627 |
+
|
1628 |
+
infrequent_indices = getattr(self, "_infrequent_indices", None)
|
1629 |
+
|
1630 |
+
for i in range(n_features):
|
1631 |
+
labels = X[:, i]
|
1632 |
+
|
1633 |
+
# replace values of X[:, i] that were nan with actual indices
|
1634 |
+
if i in self._missing_indices:
|
1635 |
+
X_i_mask = _get_mask(labels, self.encoded_missing_value)
|
1636 |
+
labels[X_i_mask] = self._missing_indices[i]
|
1637 |
+
|
1638 |
+
rows_to_update = slice(None)
|
1639 |
+
categories = self.categories_[i]
|
1640 |
+
|
1641 |
+
if infrequent_indices is not None and infrequent_indices[i] is not None:
|
1642 |
+
# Compute mask for frequent categories
|
1643 |
+
infrequent_encoding_value = len(categories) - len(infrequent_indices[i])
|
1644 |
+
infrequent_masks[i] = labels == infrequent_encoding_value
|
1645 |
+
rows_to_update = ~infrequent_masks[i]
|
1646 |
+
|
1647 |
+
# Remap categories to be only frequent categories. The infrequent
|
1648 |
+
# categories will be mapped to "infrequent_sklearn" later
|
1649 |
+
frequent_categories_mask = np.ones_like(categories, dtype=bool)
|
1650 |
+
frequent_categories_mask[infrequent_indices[i]] = False
|
1651 |
+
categories = categories[frequent_categories_mask]
|
1652 |
+
|
1653 |
+
if self.handle_unknown == "use_encoded_value":
|
1654 |
+
unknown_labels = _get_mask(labels, self.unknown_value)
|
1655 |
+
found_unknown[i] = unknown_labels
|
1656 |
+
|
1657 |
+
known_labels = ~unknown_labels
|
1658 |
+
if isinstance(rows_to_update, np.ndarray):
|
1659 |
+
rows_to_update &= known_labels
|
1660 |
+
else:
|
1661 |
+
rows_to_update = known_labels
|
1662 |
+
|
1663 |
+
labels_int = labels[rows_to_update].astype("int64", copy=False)
|
1664 |
+
X_tr[rows_to_update, i] = categories[labels_int]
|
1665 |
+
|
1666 |
+
if found_unknown or infrequent_masks:
|
1667 |
+
X_tr = X_tr.astype(object, copy=False)
|
1668 |
+
|
1669 |
+
# insert None values for unknown values
|
1670 |
+
if found_unknown:
|
1671 |
+
for idx, mask in found_unknown.items():
|
1672 |
+
X_tr[mask, idx] = None
|
1673 |
+
|
1674 |
+
if infrequent_masks:
|
1675 |
+
for idx, mask in infrequent_masks.items():
|
1676 |
+
X_tr[mask, idx] = "infrequent_sklearn"
|
1677 |
+
|
1678 |
+
return X_tr
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_function_transformer.py
ADDED
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from ..base import BaseEstimator, TransformerMixin, _fit_context
|
6 |
+
from ..utils._param_validation import StrOptions
|
7 |
+
from ..utils._set_output import ADAPTERS_MANAGER, _get_output_config
|
8 |
+
from ..utils.metaestimators import available_if
|
9 |
+
from ..utils.validation import (
|
10 |
+
_allclose_dense_sparse,
|
11 |
+
_check_feature_names_in,
|
12 |
+
_get_feature_names,
|
13 |
+
_is_pandas_df,
|
14 |
+
_is_polars_df,
|
15 |
+
check_array,
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
def _get_adapter_from_container(container):
|
20 |
+
"""Get the adapter that nows how to handle such container.
|
21 |
+
|
22 |
+
See :class:`sklearn.utils._set_output.ContainerAdapterProtocol` for more
|
23 |
+
details.
|
24 |
+
"""
|
25 |
+
module_name = container.__class__.__module__.split(".")[0]
|
26 |
+
try:
|
27 |
+
return ADAPTERS_MANAGER.adapters[module_name]
|
28 |
+
except KeyError as exc:
|
29 |
+
available_adapters = list(ADAPTERS_MANAGER.adapters.keys())
|
30 |
+
raise ValueError(
|
31 |
+
"The container does not have a registered adapter in scikit-learn. "
|
32 |
+
f"Available adapters are: {available_adapters} while the container "
|
33 |
+
f"provided is: {container!r}."
|
34 |
+
) from exc
|
35 |
+
|
36 |
+
|
37 |
+
def _identity(X):
|
38 |
+
"""The identity function."""
|
39 |
+
return X
|
40 |
+
|
41 |
+
|
42 |
+
class FunctionTransformer(TransformerMixin, BaseEstimator):
|
43 |
+
"""Constructs a transformer from an arbitrary callable.
|
44 |
+
|
45 |
+
A FunctionTransformer forwards its X (and optionally y) arguments to a
|
46 |
+
user-defined function or function object and returns the result of this
|
47 |
+
function. This is useful for stateless transformations such as taking the
|
48 |
+
log of frequencies, doing custom scaling, etc.
|
49 |
+
|
50 |
+
Note: If a lambda is used as the function, then the resulting
|
51 |
+
transformer will not be pickleable.
|
52 |
+
|
53 |
+
.. versionadded:: 0.17
|
54 |
+
|
55 |
+
Read more in the :ref:`User Guide <function_transformer>`.
|
56 |
+
|
57 |
+
Parameters
|
58 |
+
----------
|
59 |
+
func : callable, default=None
|
60 |
+
The callable to use for the transformation. This will be passed
|
61 |
+
the same arguments as transform, with args and kwargs forwarded.
|
62 |
+
If func is None, then func will be the identity function.
|
63 |
+
|
64 |
+
inverse_func : callable, default=None
|
65 |
+
The callable to use for the inverse transformation. This will be
|
66 |
+
passed the same arguments as inverse transform, with args and
|
67 |
+
kwargs forwarded. If inverse_func is None, then inverse_func
|
68 |
+
will be the identity function.
|
69 |
+
|
70 |
+
validate : bool, default=False
|
71 |
+
Indicate that the input X array should be checked before calling
|
72 |
+
``func``. The possibilities are:
|
73 |
+
|
74 |
+
- If False, there is no input validation.
|
75 |
+
- If True, then X will be converted to a 2-dimensional NumPy array or
|
76 |
+
sparse matrix. If the conversion is not possible an exception is
|
77 |
+
raised.
|
78 |
+
|
79 |
+
.. versionchanged:: 0.22
|
80 |
+
The default of ``validate`` changed from True to False.
|
81 |
+
|
82 |
+
accept_sparse : bool, default=False
|
83 |
+
Indicate that func accepts a sparse matrix as input. If validate is
|
84 |
+
False, this has no effect. Otherwise, if accept_sparse is false,
|
85 |
+
sparse matrix inputs will cause an exception to be raised.
|
86 |
+
|
87 |
+
check_inverse : bool, default=True
|
88 |
+
Whether to check that or ``func`` followed by ``inverse_func`` leads to
|
89 |
+
the original inputs. It can be used for a sanity check, raising a
|
90 |
+
warning when the condition is not fulfilled.
|
91 |
+
|
92 |
+
.. versionadded:: 0.20
|
93 |
+
|
94 |
+
feature_names_out : callable, 'one-to-one' or None, default=None
|
95 |
+
Determines the list of feature names that will be returned by the
|
96 |
+
`get_feature_names_out` method. If it is 'one-to-one', then the output
|
97 |
+
feature names will be equal to the input feature names. If it is a
|
98 |
+
callable, then it must take two positional arguments: this
|
99 |
+
`FunctionTransformer` (`self`) and an array-like of input feature names
|
100 |
+
(`input_features`). It must return an array-like of output feature
|
101 |
+
names. The `get_feature_names_out` method is only defined if
|
102 |
+
`feature_names_out` is not None.
|
103 |
+
|
104 |
+
See ``get_feature_names_out`` for more details.
|
105 |
+
|
106 |
+
.. versionadded:: 1.1
|
107 |
+
|
108 |
+
kw_args : dict, default=None
|
109 |
+
Dictionary of additional keyword arguments to pass to func.
|
110 |
+
|
111 |
+
.. versionadded:: 0.18
|
112 |
+
|
113 |
+
inv_kw_args : dict, default=None
|
114 |
+
Dictionary of additional keyword arguments to pass to inverse_func.
|
115 |
+
|
116 |
+
.. versionadded:: 0.18
|
117 |
+
|
118 |
+
Attributes
|
119 |
+
----------
|
120 |
+
n_features_in_ : int
|
121 |
+
Number of features seen during :term:`fit`.
|
122 |
+
|
123 |
+
.. versionadded:: 0.24
|
124 |
+
|
125 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
126 |
+
Names of features seen during :term:`fit`. Defined only when `X` has feature
|
127 |
+
names that are all strings.
|
128 |
+
|
129 |
+
.. versionadded:: 1.0
|
130 |
+
|
131 |
+
See Also
|
132 |
+
--------
|
133 |
+
MaxAbsScaler : Scale each feature by its maximum absolute value.
|
134 |
+
StandardScaler : Standardize features by removing the mean and
|
135 |
+
scaling to unit variance.
|
136 |
+
LabelBinarizer : Binarize labels in a one-vs-all fashion.
|
137 |
+
MultiLabelBinarizer : Transform between iterable of iterables
|
138 |
+
and a multilabel format.
|
139 |
+
|
140 |
+
Notes
|
141 |
+
-----
|
142 |
+
If `func` returns an output with a `columns` attribute, then the columns is enforced
|
143 |
+
to be consistent with the output of `get_feature_names_out`.
|
144 |
+
|
145 |
+
Examples
|
146 |
+
--------
|
147 |
+
>>> import numpy as np
|
148 |
+
>>> from sklearn.preprocessing import FunctionTransformer
|
149 |
+
>>> transformer = FunctionTransformer(np.log1p)
|
150 |
+
>>> X = np.array([[0, 1], [2, 3]])
|
151 |
+
>>> transformer.transform(X)
|
152 |
+
array([[0. , 0.6931...],
|
153 |
+
[1.0986..., 1.3862...]])
|
154 |
+
"""
|
155 |
+
|
156 |
+
_parameter_constraints: dict = {
|
157 |
+
"func": [callable, None],
|
158 |
+
"inverse_func": [callable, None],
|
159 |
+
"validate": ["boolean"],
|
160 |
+
"accept_sparse": ["boolean"],
|
161 |
+
"check_inverse": ["boolean"],
|
162 |
+
"feature_names_out": [callable, StrOptions({"one-to-one"}), None],
|
163 |
+
"kw_args": [dict, None],
|
164 |
+
"inv_kw_args": [dict, None],
|
165 |
+
}
|
166 |
+
|
167 |
+
def __init__(
|
168 |
+
self,
|
169 |
+
func=None,
|
170 |
+
inverse_func=None,
|
171 |
+
*,
|
172 |
+
validate=False,
|
173 |
+
accept_sparse=False,
|
174 |
+
check_inverse=True,
|
175 |
+
feature_names_out=None,
|
176 |
+
kw_args=None,
|
177 |
+
inv_kw_args=None,
|
178 |
+
):
|
179 |
+
self.func = func
|
180 |
+
self.inverse_func = inverse_func
|
181 |
+
self.validate = validate
|
182 |
+
self.accept_sparse = accept_sparse
|
183 |
+
self.check_inverse = check_inverse
|
184 |
+
self.feature_names_out = feature_names_out
|
185 |
+
self.kw_args = kw_args
|
186 |
+
self.inv_kw_args = inv_kw_args
|
187 |
+
|
188 |
+
def _check_input(self, X, *, reset):
|
189 |
+
if self.validate:
|
190 |
+
return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset)
|
191 |
+
elif reset:
|
192 |
+
# Set feature_names_in_ and n_features_in_ even if validate=False
|
193 |
+
# We run this only when reset==True to store the attributes but not
|
194 |
+
# validate them, because validate=False
|
195 |
+
self._check_n_features(X, reset=reset)
|
196 |
+
self._check_feature_names(X, reset=reset)
|
197 |
+
return X
|
198 |
+
|
199 |
+
def _check_inverse_transform(self, X):
|
200 |
+
"""Check that func and inverse_func are the inverse."""
|
201 |
+
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
|
202 |
+
X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
|
203 |
+
|
204 |
+
if hasattr(X, "dtype"):
|
205 |
+
dtypes = [X.dtype]
|
206 |
+
elif hasattr(X, "dtypes"):
|
207 |
+
# Dataframes can have multiple dtypes
|
208 |
+
dtypes = X.dtypes
|
209 |
+
|
210 |
+
if not all(np.issubdtype(d, np.number) for d in dtypes):
|
211 |
+
raise ValueError(
|
212 |
+
"'check_inverse' is only supported when all the elements in `X` is"
|
213 |
+
" numerical."
|
214 |
+
)
|
215 |
+
|
216 |
+
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
|
217 |
+
warnings.warn(
|
218 |
+
(
|
219 |
+
"The provided functions are not strictly"
|
220 |
+
" inverse of each other. If you are sure you"
|
221 |
+
" want to proceed regardless, set"
|
222 |
+
" 'check_inverse=False'."
|
223 |
+
),
|
224 |
+
UserWarning,
|
225 |
+
)
|
226 |
+
|
227 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
228 |
+
def fit(self, X, y=None):
|
229 |
+
"""Fit transformer by checking X.
|
230 |
+
|
231 |
+
If ``validate`` is ``True``, ``X`` will be checked.
|
232 |
+
|
233 |
+
Parameters
|
234 |
+
----------
|
235 |
+
X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
|
236 |
+
if `validate=True` else any object that `func` can handle
|
237 |
+
Input array.
|
238 |
+
|
239 |
+
y : Ignored
|
240 |
+
Not used, present here for API consistency by convention.
|
241 |
+
|
242 |
+
Returns
|
243 |
+
-------
|
244 |
+
self : object
|
245 |
+
FunctionTransformer class instance.
|
246 |
+
"""
|
247 |
+
X = self._check_input(X, reset=True)
|
248 |
+
if self.check_inverse and not (self.func is None or self.inverse_func is None):
|
249 |
+
self._check_inverse_transform(X)
|
250 |
+
return self
|
251 |
+
|
252 |
+
def transform(self, X):
|
253 |
+
"""Transform X using the forward function.
|
254 |
+
|
255 |
+
Parameters
|
256 |
+
----------
|
257 |
+
X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
|
258 |
+
if `validate=True` else any object that `func` can handle
|
259 |
+
Input array.
|
260 |
+
|
261 |
+
Returns
|
262 |
+
-------
|
263 |
+
X_out : array-like, shape (n_samples, n_features)
|
264 |
+
Transformed input.
|
265 |
+
"""
|
266 |
+
X = self._check_input(X, reset=False)
|
267 |
+
out = self._transform(X, func=self.func, kw_args=self.kw_args)
|
268 |
+
output_config = _get_output_config("transform", self)["dense"]
|
269 |
+
|
270 |
+
if hasattr(out, "columns") and self.feature_names_out is not None:
|
271 |
+
# check the consistency between the column provided by `transform` and
|
272 |
+
# the the column names provided by `get_feature_names_out`.
|
273 |
+
feature_names_out = self.get_feature_names_out()
|
274 |
+
if list(out.columns) != list(feature_names_out):
|
275 |
+
# we can override the column names of the output if it is inconsistent
|
276 |
+
# with the column names provided by `get_feature_names_out` in the
|
277 |
+
# following cases:
|
278 |
+
# * `func` preserved the column names between the input and the output
|
279 |
+
# * the input column names are all numbers
|
280 |
+
# * the output is requested to be a DataFrame (pandas or polars)
|
281 |
+
feature_names_in = getattr(
|
282 |
+
X, "feature_names_in_", _get_feature_names(X)
|
283 |
+
)
|
284 |
+
same_feature_names_in_out = feature_names_in is not None and list(
|
285 |
+
feature_names_in
|
286 |
+
) == list(out.columns)
|
287 |
+
not_all_str_columns = not all(
|
288 |
+
isinstance(col, str) for col in out.columns
|
289 |
+
)
|
290 |
+
if same_feature_names_in_out or not_all_str_columns:
|
291 |
+
adapter = _get_adapter_from_container(out)
|
292 |
+
out = adapter.create_container(
|
293 |
+
X_output=out,
|
294 |
+
X_original=out,
|
295 |
+
columns=feature_names_out,
|
296 |
+
inplace=False,
|
297 |
+
)
|
298 |
+
else:
|
299 |
+
raise ValueError(
|
300 |
+
"The output generated by `func` have different column names "
|
301 |
+
"than the ones provided by `get_feature_names_out`. "
|
302 |
+
f"Got output with columns names: {list(out.columns)} and "
|
303 |
+
"`get_feature_names_out` returned: "
|
304 |
+
f"{list(self.get_feature_names_out())}. "
|
305 |
+
"The column names can be overridden by setting "
|
306 |
+
"`set_output(transform='pandas')` or "
|
307 |
+
"`set_output(transform='polars')` such that the column names "
|
308 |
+
"are set to the names provided by `get_feature_names_out`."
|
309 |
+
)
|
310 |
+
|
311 |
+
if self.feature_names_out is None:
|
312 |
+
warn_msg = (
|
313 |
+
"When `set_output` is configured to be '{0}', `func` should return "
|
314 |
+
"a {0} DataFrame to follow the `set_output` API or `feature_names_out`"
|
315 |
+
" should be defined."
|
316 |
+
)
|
317 |
+
if output_config == "pandas" and not _is_pandas_df(out):
|
318 |
+
warnings.warn(warn_msg.format("pandas"))
|
319 |
+
elif output_config == "polars" and not _is_polars_df(out):
|
320 |
+
warnings.warn(warn_msg.format("polars"))
|
321 |
+
|
322 |
+
return out
|
323 |
+
|
324 |
+
def inverse_transform(self, X):
|
325 |
+
"""Transform X using the inverse function.
|
326 |
+
|
327 |
+
Parameters
|
328 |
+
----------
|
329 |
+
X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
|
330 |
+
if `validate=True` else any object that `inverse_func` can handle
|
331 |
+
Input array.
|
332 |
+
|
333 |
+
Returns
|
334 |
+
-------
|
335 |
+
X_out : array-like, shape (n_samples, n_features)
|
336 |
+
Transformed input.
|
337 |
+
"""
|
338 |
+
if self.validate:
|
339 |
+
X = check_array(X, accept_sparse=self.accept_sparse)
|
340 |
+
return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
|
341 |
+
|
342 |
+
@available_if(lambda self: self.feature_names_out is not None)
|
343 |
+
def get_feature_names_out(self, input_features=None):
|
344 |
+
"""Get output feature names for transformation.
|
345 |
+
|
346 |
+
This method is only defined if `feature_names_out` is not None.
|
347 |
+
|
348 |
+
Parameters
|
349 |
+
----------
|
350 |
+
input_features : array-like of str or None, default=None
|
351 |
+
Input feature names.
|
352 |
+
|
353 |
+
- If `input_features` is None, then `feature_names_in_` is
|
354 |
+
used as the input feature names. If `feature_names_in_` is not
|
355 |
+
defined, then names are generated:
|
356 |
+
`[x0, x1, ..., x(n_features_in_ - 1)]`.
|
357 |
+
- If `input_features` is array-like, then `input_features` must
|
358 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
359 |
+
|
360 |
+
Returns
|
361 |
+
-------
|
362 |
+
feature_names_out : ndarray of str objects
|
363 |
+
Transformed feature names.
|
364 |
+
|
365 |
+
- If `feature_names_out` is 'one-to-one', the input feature names
|
366 |
+
are returned (see `input_features` above). This requires
|
367 |
+
`feature_names_in_` and/or `n_features_in_` to be defined, which
|
368 |
+
is done automatically if `validate=True`. Alternatively, you can
|
369 |
+
set them in `func`.
|
370 |
+
- If `feature_names_out` is a callable, then it is called with two
|
371 |
+
arguments, `self` and `input_features`, and its return value is
|
372 |
+
returned by this method.
|
373 |
+
"""
|
374 |
+
if hasattr(self, "n_features_in_") or input_features is not None:
|
375 |
+
input_features = _check_feature_names_in(self, input_features)
|
376 |
+
if self.feature_names_out == "one-to-one":
|
377 |
+
names_out = input_features
|
378 |
+
elif callable(self.feature_names_out):
|
379 |
+
names_out = self.feature_names_out(self, input_features)
|
380 |
+
else:
|
381 |
+
raise ValueError(
|
382 |
+
f"feature_names_out={self.feature_names_out!r} is invalid. "
|
383 |
+
'It must either be "one-to-one" or a callable with two '
|
384 |
+
"arguments: the function transformer and an array-like of "
|
385 |
+
"input feature names. The callable must return an array-like "
|
386 |
+
"of output feature names."
|
387 |
+
)
|
388 |
+
return np.asarray(names_out, dtype=object)
|
389 |
+
|
390 |
+
def _transform(self, X, func=None, kw_args=None):
|
391 |
+
if func is None:
|
392 |
+
func = _identity
|
393 |
+
|
394 |
+
return func(X, **(kw_args if kw_args else {}))
|
395 |
+
|
396 |
+
def __sklearn_is_fitted__(self):
|
397 |
+
"""Return True since FunctionTransfomer is stateless."""
|
398 |
+
return True
|
399 |
+
|
400 |
+
def _more_tags(self):
|
401 |
+
return {"no_validation": not self.validate, "stateless": True}
|
402 |
+
|
403 |
+
def set_output(self, *, transform=None):
|
404 |
+
"""Set output container.
|
405 |
+
|
406 |
+
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
|
407 |
+
for an example on how to use the API.
|
408 |
+
|
409 |
+
Parameters
|
410 |
+
----------
|
411 |
+
transform : {"default", "pandas"}, default=None
|
412 |
+
Configure output of `transform` and `fit_transform`.
|
413 |
+
|
414 |
+
- `"default"`: Default output format of a transformer
|
415 |
+
- `"pandas"`: DataFrame output
|
416 |
+
- `"polars"`: Polars output
|
417 |
+
- `None`: Transform configuration is unchanged
|
418 |
+
|
419 |
+
.. versionadded:: 1.4
|
420 |
+
`"polars"` option was added.
|
421 |
+
|
422 |
+
Returns
|
423 |
+
-------
|
424 |
+
self : estimator instance
|
425 |
+
Estimator instance.
|
426 |
+
"""
|
427 |
+
if not hasattr(self, "_sklearn_output_config"):
|
428 |
+
self._sklearn_output_config = {}
|
429 |
+
|
430 |
+
self._sklearn_output_config["transform"] = transform
|
431 |
+
return self
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_label.py
ADDED
@@ -0,0 +1,951 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Authors: Alexandre Gramfort <[email protected]>
|
2 |
+
# Mathieu Blondel <[email protected]>
|
3 |
+
# Olivier Grisel <[email protected]>
|
4 |
+
# Andreas Mueller <[email protected]>
|
5 |
+
# Joel Nothman <[email protected]>
|
6 |
+
# Hamzeh Alsalhi <[email protected]>
|
7 |
+
# License: BSD 3 clause
|
8 |
+
|
9 |
+
import array
|
10 |
+
import itertools
|
11 |
+
import warnings
|
12 |
+
from collections import defaultdict
|
13 |
+
from numbers import Integral
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import scipy.sparse as sp
|
17 |
+
|
18 |
+
from ..base import BaseEstimator, TransformerMixin, _fit_context
|
19 |
+
from ..utils import column_or_1d
|
20 |
+
from ..utils._encode import _encode, _unique
|
21 |
+
from ..utils._param_validation import Interval, validate_params
|
22 |
+
from ..utils.multiclass import type_of_target, unique_labels
|
23 |
+
from ..utils.sparsefuncs import min_max_axis
|
24 |
+
from ..utils.validation import _num_samples, check_array, check_is_fitted
|
25 |
+
|
26 |
+
__all__ = [
|
27 |
+
"label_binarize",
|
28 |
+
"LabelBinarizer",
|
29 |
+
"LabelEncoder",
|
30 |
+
"MultiLabelBinarizer",
|
31 |
+
]
|
32 |
+
|
33 |
+
|
34 |
+
class LabelEncoder(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
|
35 |
+
"""Encode target labels with value between 0 and n_classes-1.
|
36 |
+
|
37 |
+
This transformer should be used to encode target values, *i.e.* `y`, and
|
38 |
+
not the input `X`.
|
39 |
+
|
40 |
+
Read more in the :ref:`User Guide <preprocessing_targets>`.
|
41 |
+
|
42 |
+
.. versionadded:: 0.12
|
43 |
+
|
44 |
+
Attributes
|
45 |
+
----------
|
46 |
+
classes_ : ndarray of shape (n_classes,)
|
47 |
+
Holds the label for each class.
|
48 |
+
|
49 |
+
See Also
|
50 |
+
--------
|
51 |
+
OrdinalEncoder : Encode categorical features using an ordinal encoding
|
52 |
+
scheme.
|
53 |
+
OneHotEncoder : Encode categorical features as a one-hot numeric array.
|
54 |
+
|
55 |
+
Examples
|
56 |
+
--------
|
57 |
+
`LabelEncoder` can be used to normalize labels.
|
58 |
+
|
59 |
+
>>> from sklearn.preprocessing import LabelEncoder
|
60 |
+
>>> le = LabelEncoder()
|
61 |
+
>>> le.fit([1, 2, 2, 6])
|
62 |
+
LabelEncoder()
|
63 |
+
>>> le.classes_
|
64 |
+
array([1, 2, 6])
|
65 |
+
>>> le.transform([1, 1, 2, 6])
|
66 |
+
array([0, 0, 1, 2]...)
|
67 |
+
>>> le.inverse_transform([0, 0, 1, 2])
|
68 |
+
array([1, 1, 2, 6])
|
69 |
+
|
70 |
+
It can also be used to transform non-numerical labels (as long as they are
|
71 |
+
hashable and comparable) to numerical labels.
|
72 |
+
|
73 |
+
>>> le = LabelEncoder()
|
74 |
+
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
|
75 |
+
LabelEncoder()
|
76 |
+
>>> list(le.classes_)
|
77 |
+
['amsterdam', 'paris', 'tokyo']
|
78 |
+
>>> le.transform(["tokyo", "tokyo", "paris"])
|
79 |
+
array([2, 2, 1]...)
|
80 |
+
>>> list(le.inverse_transform([2, 2, 1]))
|
81 |
+
['tokyo', 'tokyo', 'paris']
|
82 |
+
"""
|
83 |
+
|
84 |
+
def fit(self, y):
|
85 |
+
"""Fit label encoder.
|
86 |
+
|
87 |
+
Parameters
|
88 |
+
----------
|
89 |
+
y : array-like of shape (n_samples,)
|
90 |
+
Target values.
|
91 |
+
|
92 |
+
Returns
|
93 |
+
-------
|
94 |
+
self : returns an instance of self.
|
95 |
+
Fitted label encoder.
|
96 |
+
"""
|
97 |
+
y = column_or_1d(y, warn=True)
|
98 |
+
self.classes_ = _unique(y)
|
99 |
+
return self
|
100 |
+
|
101 |
+
def fit_transform(self, y):
|
102 |
+
"""Fit label encoder and return encoded labels.
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
y : array-like of shape (n_samples,)
|
107 |
+
Target values.
|
108 |
+
|
109 |
+
Returns
|
110 |
+
-------
|
111 |
+
y : array-like of shape (n_samples,)
|
112 |
+
Encoded labels.
|
113 |
+
"""
|
114 |
+
y = column_or_1d(y, warn=True)
|
115 |
+
self.classes_, y = _unique(y, return_inverse=True)
|
116 |
+
return y
|
117 |
+
|
118 |
+
def transform(self, y):
|
119 |
+
"""Transform labels to normalized encoding.
|
120 |
+
|
121 |
+
Parameters
|
122 |
+
----------
|
123 |
+
y : array-like of shape (n_samples,)
|
124 |
+
Target values.
|
125 |
+
|
126 |
+
Returns
|
127 |
+
-------
|
128 |
+
y : array-like of shape (n_samples,)
|
129 |
+
Labels as normalized encodings.
|
130 |
+
"""
|
131 |
+
check_is_fitted(self)
|
132 |
+
y = column_or_1d(y, dtype=self.classes_.dtype, warn=True)
|
133 |
+
# transform of empty array is empty array
|
134 |
+
if _num_samples(y) == 0:
|
135 |
+
return np.array([])
|
136 |
+
|
137 |
+
return _encode(y, uniques=self.classes_)
|
138 |
+
|
139 |
+
def inverse_transform(self, y):
|
140 |
+
"""Transform labels back to original encoding.
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
y : ndarray of shape (n_samples,)
|
145 |
+
Target values.
|
146 |
+
|
147 |
+
Returns
|
148 |
+
-------
|
149 |
+
y : ndarray of shape (n_samples,)
|
150 |
+
Original encoding.
|
151 |
+
"""
|
152 |
+
check_is_fitted(self)
|
153 |
+
y = column_or_1d(y, warn=True)
|
154 |
+
# inverse transform of empty array is empty array
|
155 |
+
if _num_samples(y) == 0:
|
156 |
+
return np.array([])
|
157 |
+
|
158 |
+
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
|
159 |
+
if len(diff):
|
160 |
+
raise ValueError("y contains previously unseen labels: %s" % str(diff))
|
161 |
+
y = np.asarray(y)
|
162 |
+
return self.classes_[y]
|
163 |
+
|
164 |
+
def _more_tags(self):
|
165 |
+
return {"X_types": ["1dlabels"]}
|
166 |
+
|
167 |
+
|
168 |
+
class LabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
|
169 |
+
"""Binarize labels in a one-vs-all fashion.
|
170 |
+
|
171 |
+
Several regression and binary classification algorithms are
|
172 |
+
available in scikit-learn. A simple way to extend these algorithms
|
173 |
+
to the multi-class classification case is to use the so-called
|
174 |
+
one-vs-all scheme.
|
175 |
+
|
176 |
+
At learning time, this simply consists in learning one regressor
|
177 |
+
or binary classifier per class. In doing so, one needs to convert
|
178 |
+
multi-class labels to binary labels (belong or does not belong
|
179 |
+
to the class). `LabelBinarizer` makes this process easy with the
|
180 |
+
transform method.
|
181 |
+
|
182 |
+
At prediction time, one assigns the class for which the corresponding
|
183 |
+
model gave the greatest confidence. `LabelBinarizer` makes this easy
|
184 |
+
with the :meth:`inverse_transform` method.
|
185 |
+
|
186 |
+
Read more in the :ref:`User Guide <preprocessing_targets>`.
|
187 |
+
|
188 |
+
Parameters
|
189 |
+
----------
|
190 |
+
neg_label : int, default=0
|
191 |
+
Value with which negative labels must be encoded.
|
192 |
+
|
193 |
+
pos_label : int, default=1
|
194 |
+
Value with which positive labels must be encoded.
|
195 |
+
|
196 |
+
sparse_output : bool, default=False
|
197 |
+
True if the returned array from transform is desired to be in sparse
|
198 |
+
CSR format.
|
199 |
+
|
200 |
+
Attributes
|
201 |
+
----------
|
202 |
+
classes_ : ndarray of shape (n_classes,)
|
203 |
+
Holds the label for each class.
|
204 |
+
|
205 |
+
y_type_ : str
|
206 |
+
Represents the type of the target data as evaluated by
|
207 |
+
:func:`~sklearn.utils.multiclass.type_of_target`. Possible type are
|
208 |
+
'continuous', 'continuous-multioutput', 'binary', 'multiclass',
|
209 |
+
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
|
210 |
+
|
211 |
+
sparse_input_ : bool
|
212 |
+
`True` if the input data to transform is given as a sparse matrix,
|
213 |
+
`False` otherwise.
|
214 |
+
|
215 |
+
See Also
|
216 |
+
--------
|
217 |
+
label_binarize : Function to perform the transform operation of
|
218 |
+
LabelBinarizer with fixed classes.
|
219 |
+
OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
|
220 |
+
scheme.
|
221 |
+
|
222 |
+
Examples
|
223 |
+
--------
|
224 |
+
>>> from sklearn.preprocessing import LabelBinarizer
|
225 |
+
>>> lb = LabelBinarizer()
|
226 |
+
>>> lb.fit([1, 2, 6, 4, 2])
|
227 |
+
LabelBinarizer()
|
228 |
+
>>> lb.classes_
|
229 |
+
array([1, 2, 4, 6])
|
230 |
+
>>> lb.transform([1, 6])
|
231 |
+
array([[1, 0, 0, 0],
|
232 |
+
[0, 0, 0, 1]])
|
233 |
+
|
234 |
+
Binary targets transform to a column vector
|
235 |
+
|
236 |
+
>>> lb = LabelBinarizer()
|
237 |
+
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
|
238 |
+
array([[1],
|
239 |
+
[0],
|
240 |
+
[0],
|
241 |
+
[1]])
|
242 |
+
|
243 |
+
Passing a 2D matrix for multilabel classification
|
244 |
+
|
245 |
+
>>> import numpy as np
|
246 |
+
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
|
247 |
+
LabelBinarizer()
|
248 |
+
>>> lb.classes_
|
249 |
+
array([0, 1, 2])
|
250 |
+
>>> lb.transform([0, 1, 2, 1])
|
251 |
+
array([[1, 0, 0],
|
252 |
+
[0, 1, 0],
|
253 |
+
[0, 0, 1],
|
254 |
+
[0, 1, 0]])
|
255 |
+
"""
|
256 |
+
|
257 |
+
_parameter_constraints: dict = {
|
258 |
+
"neg_label": [Integral],
|
259 |
+
"pos_label": [Integral],
|
260 |
+
"sparse_output": ["boolean"],
|
261 |
+
}
|
262 |
+
|
263 |
+
def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):
|
264 |
+
self.neg_label = neg_label
|
265 |
+
self.pos_label = pos_label
|
266 |
+
self.sparse_output = sparse_output
|
267 |
+
|
268 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
269 |
+
def fit(self, y):
|
270 |
+
"""Fit label binarizer.
|
271 |
+
|
272 |
+
Parameters
|
273 |
+
----------
|
274 |
+
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
|
275 |
+
Target values. The 2-d matrix should only contain 0 and 1,
|
276 |
+
represents multilabel classification.
|
277 |
+
|
278 |
+
Returns
|
279 |
+
-------
|
280 |
+
self : object
|
281 |
+
Returns the instance itself.
|
282 |
+
"""
|
283 |
+
if self.neg_label >= self.pos_label:
|
284 |
+
raise ValueError(
|
285 |
+
f"neg_label={self.neg_label} must be strictly less than "
|
286 |
+
f"pos_label={self.pos_label}."
|
287 |
+
)
|
288 |
+
|
289 |
+
if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0):
|
290 |
+
raise ValueError(
|
291 |
+
"Sparse binarization is only supported with non "
|
292 |
+
"zero pos_label and zero neg_label, got "
|
293 |
+
f"pos_label={self.pos_label} and neg_label={self.neg_label}"
|
294 |
+
)
|
295 |
+
|
296 |
+
self.y_type_ = type_of_target(y, input_name="y")
|
297 |
+
|
298 |
+
if "multioutput" in self.y_type_:
|
299 |
+
raise ValueError(
|
300 |
+
"Multioutput target data is not supported with label binarization"
|
301 |
+
)
|
302 |
+
if _num_samples(y) == 0:
|
303 |
+
raise ValueError("y has 0 samples: %r" % y)
|
304 |
+
|
305 |
+
self.sparse_input_ = sp.issparse(y)
|
306 |
+
self.classes_ = unique_labels(y)
|
307 |
+
return self
|
308 |
+
|
309 |
+
def fit_transform(self, y):
|
310 |
+
"""Fit label binarizer/transform multi-class labels to binary labels.
|
311 |
+
|
312 |
+
The output of transform is sometimes referred to as
|
313 |
+
the 1-of-K coding scheme.
|
314 |
+
|
315 |
+
Parameters
|
316 |
+
----------
|
317 |
+
y : {ndarray, sparse matrix} of shape (n_samples,) or \
|
318 |
+
(n_samples, n_classes)
|
319 |
+
Target values. The 2-d matrix should only contain 0 and 1,
|
320 |
+
represents multilabel classification. Sparse matrix can be
|
321 |
+
CSR, CSC, COO, DOK, or LIL.
|
322 |
+
|
323 |
+
Returns
|
324 |
+
-------
|
325 |
+
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
326 |
+
Shape will be (n_samples, 1) for binary problems. Sparse matrix
|
327 |
+
will be of CSR format.
|
328 |
+
"""
|
329 |
+
return self.fit(y).transform(y)
|
330 |
+
|
331 |
+
def transform(self, y):
|
332 |
+
"""Transform multi-class labels to binary labels.
|
333 |
+
|
334 |
+
The output of transform is sometimes referred to by some authors as
|
335 |
+
the 1-of-K coding scheme.
|
336 |
+
|
337 |
+
Parameters
|
338 |
+
----------
|
339 |
+
y : {array, sparse matrix} of shape (n_samples,) or \
|
340 |
+
(n_samples, n_classes)
|
341 |
+
Target values. The 2-d matrix should only contain 0 and 1,
|
342 |
+
represents multilabel classification. Sparse matrix can be
|
343 |
+
CSR, CSC, COO, DOK, or LIL.
|
344 |
+
|
345 |
+
Returns
|
346 |
+
-------
|
347 |
+
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
348 |
+
Shape will be (n_samples, 1) for binary problems. Sparse matrix
|
349 |
+
will be of CSR format.
|
350 |
+
"""
|
351 |
+
check_is_fitted(self)
|
352 |
+
|
353 |
+
y_is_multilabel = type_of_target(y).startswith("multilabel")
|
354 |
+
if y_is_multilabel and not self.y_type_.startswith("multilabel"):
|
355 |
+
raise ValueError("The object was not fitted with multilabel input.")
|
356 |
+
|
357 |
+
return label_binarize(
|
358 |
+
y,
|
359 |
+
classes=self.classes_,
|
360 |
+
pos_label=self.pos_label,
|
361 |
+
neg_label=self.neg_label,
|
362 |
+
sparse_output=self.sparse_output,
|
363 |
+
)
|
364 |
+
|
365 |
+
def inverse_transform(self, Y, threshold=None):
|
366 |
+
"""Transform binary labels back to multi-class labels.
|
367 |
+
|
368 |
+
Parameters
|
369 |
+
----------
|
370 |
+
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
371 |
+
Target values. All sparse matrices are converted to CSR before
|
372 |
+
inverse transformation.
|
373 |
+
|
374 |
+
threshold : float, default=None
|
375 |
+
Threshold used in the binary and multi-label cases.
|
376 |
+
|
377 |
+
Use 0 when ``Y`` contains the output of :term:`decision_function`
|
378 |
+
(classifier).
|
379 |
+
Use 0.5 when ``Y`` contains the output of :term:`predict_proba`.
|
380 |
+
|
381 |
+
If None, the threshold is assumed to be half way between
|
382 |
+
neg_label and pos_label.
|
383 |
+
|
384 |
+
Returns
|
385 |
+
-------
|
386 |
+
y : {ndarray, sparse matrix} of shape (n_samples,)
|
387 |
+
Target values. Sparse matrix will be of CSR format.
|
388 |
+
|
389 |
+
Notes
|
390 |
+
-----
|
391 |
+
In the case when the binary labels are fractional
|
392 |
+
(probabilistic), :meth:`inverse_transform` chooses the class with the
|
393 |
+
greatest value. Typically, this allows to use the output of a
|
394 |
+
linear model's :term:`decision_function` method directly as the input
|
395 |
+
of :meth:`inverse_transform`.
|
396 |
+
"""
|
397 |
+
check_is_fitted(self)
|
398 |
+
|
399 |
+
if threshold is None:
|
400 |
+
threshold = (self.pos_label + self.neg_label) / 2.0
|
401 |
+
|
402 |
+
if self.y_type_ == "multiclass":
|
403 |
+
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
|
404 |
+
else:
|
405 |
+
y_inv = _inverse_binarize_thresholding(
|
406 |
+
Y, self.y_type_, self.classes_, threshold
|
407 |
+
)
|
408 |
+
|
409 |
+
if self.sparse_input_:
|
410 |
+
y_inv = sp.csr_matrix(y_inv)
|
411 |
+
elif sp.issparse(y_inv):
|
412 |
+
y_inv = y_inv.toarray()
|
413 |
+
|
414 |
+
return y_inv
|
415 |
+
|
416 |
+
def _more_tags(self):
|
417 |
+
return {"X_types": ["1dlabels"]}
|
418 |
+
|
419 |
+
|
420 |
+
@validate_params(
|
421 |
+
{
|
422 |
+
"y": ["array-like"],
|
423 |
+
"classes": ["array-like"],
|
424 |
+
"neg_label": [Interval(Integral, None, None, closed="neither")],
|
425 |
+
"pos_label": [Interval(Integral, None, None, closed="neither")],
|
426 |
+
"sparse_output": ["boolean"],
|
427 |
+
},
|
428 |
+
prefer_skip_nested_validation=True,
|
429 |
+
)
|
430 |
+
def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False):
|
431 |
+
"""Binarize labels in a one-vs-all fashion.
|
432 |
+
|
433 |
+
Several regression and binary classification algorithms are
|
434 |
+
available in scikit-learn. A simple way to extend these algorithms
|
435 |
+
to the multi-class classification case is to use the so-called
|
436 |
+
one-vs-all scheme.
|
437 |
+
|
438 |
+
This function makes it possible to compute this transformation for a
|
439 |
+
fixed set of class labels known ahead of time.
|
440 |
+
|
441 |
+
Parameters
|
442 |
+
----------
|
443 |
+
y : array-like
|
444 |
+
Sequence of integer labels or multilabel data to encode.
|
445 |
+
|
446 |
+
classes : array-like of shape (n_classes,)
|
447 |
+
Uniquely holds the label for each class.
|
448 |
+
|
449 |
+
neg_label : int, default=0
|
450 |
+
Value with which negative labels must be encoded.
|
451 |
+
|
452 |
+
pos_label : int, default=1
|
453 |
+
Value with which positive labels must be encoded.
|
454 |
+
|
455 |
+
sparse_output : bool, default=False,
|
456 |
+
Set to true if output binary array is desired in CSR sparse format.
|
457 |
+
|
458 |
+
Returns
|
459 |
+
-------
|
460 |
+
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
461 |
+
Shape will be (n_samples, 1) for binary problems. Sparse matrix will
|
462 |
+
be of CSR format.
|
463 |
+
|
464 |
+
See Also
|
465 |
+
--------
|
466 |
+
LabelBinarizer : Class used to wrap the functionality of label_binarize and
|
467 |
+
allow for fitting to classes independently of the transform operation.
|
468 |
+
|
469 |
+
Examples
|
470 |
+
--------
|
471 |
+
>>> from sklearn.preprocessing import label_binarize
|
472 |
+
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
|
473 |
+
array([[1, 0, 0, 0],
|
474 |
+
[0, 0, 0, 1]])
|
475 |
+
|
476 |
+
The class ordering is preserved:
|
477 |
+
|
478 |
+
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
|
479 |
+
array([[1, 0, 0, 0],
|
480 |
+
[0, 1, 0, 0]])
|
481 |
+
|
482 |
+
Binary targets transform to a column vector
|
483 |
+
|
484 |
+
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
|
485 |
+
array([[1],
|
486 |
+
[0],
|
487 |
+
[0],
|
488 |
+
[1]])
|
489 |
+
"""
|
490 |
+
if not isinstance(y, list):
|
491 |
+
# XXX Workaround that will be removed when list of list format is
|
492 |
+
# dropped
|
493 |
+
y = check_array(
|
494 |
+
y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None
|
495 |
+
)
|
496 |
+
else:
|
497 |
+
if _num_samples(y) == 0:
|
498 |
+
raise ValueError("y has 0 samples: %r" % y)
|
499 |
+
if neg_label >= pos_label:
|
500 |
+
raise ValueError(
|
501 |
+
"neg_label={0} must be strictly less than pos_label={1}.".format(
|
502 |
+
neg_label, pos_label
|
503 |
+
)
|
504 |
+
)
|
505 |
+
|
506 |
+
if sparse_output and (pos_label == 0 or neg_label != 0):
|
507 |
+
raise ValueError(
|
508 |
+
"Sparse binarization is only supported with non "
|
509 |
+
"zero pos_label and zero neg_label, got "
|
510 |
+
"pos_label={0} and neg_label={1}"
|
511 |
+
"".format(pos_label, neg_label)
|
512 |
+
)
|
513 |
+
|
514 |
+
# To account for pos_label == 0 in the dense case
|
515 |
+
pos_switch = pos_label == 0
|
516 |
+
if pos_switch:
|
517 |
+
pos_label = -neg_label
|
518 |
+
|
519 |
+
y_type = type_of_target(y)
|
520 |
+
if "multioutput" in y_type:
|
521 |
+
raise ValueError(
|
522 |
+
"Multioutput target data is not supported with label binarization"
|
523 |
+
)
|
524 |
+
if y_type == "unknown":
|
525 |
+
raise ValueError("The type of target data is not known")
|
526 |
+
|
527 |
+
n_samples = y.shape[0] if sp.issparse(y) else len(y)
|
528 |
+
n_classes = len(classes)
|
529 |
+
classes = np.asarray(classes)
|
530 |
+
|
531 |
+
if y_type == "binary":
|
532 |
+
if n_classes == 1:
|
533 |
+
if sparse_output:
|
534 |
+
return sp.csr_matrix((n_samples, 1), dtype=int)
|
535 |
+
else:
|
536 |
+
Y = np.zeros((len(y), 1), dtype=int)
|
537 |
+
Y += neg_label
|
538 |
+
return Y
|
539 |
+
elif len(classes) >= 3:
|
540 |
+
y_type = "multiclass"
|
541 |
+
|
542 |
+
sorted_class = np.sort(classes)
|
543 |
+
if y_type == "multilabel-indicator":
|
544 |
+
y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0])
|
545 |
+
if classes.size != y_n_classes:
|
546 |
+
raise ValueError(
|
547 |
+
"classes {0} mismatch with the labels {1} found in the data".format(
|
548 |
+
classes, unique_labels(y)
|
549 |
+
)
|
550 |
+
)
|
551 |
+
|
552 |
+
if y_type in ("binary", "multiclass"):
|
553 |
+
y = column_or_1d(y)
|
554 |
+
|
555 |
+
# pick out the known labels from y
|
556 |
+
y_in_classes = np.isin(y, classes)
|
557 |
+
y_seen = y[y_in_classes]
|
558 |
+
indices = np.searchsorted(sorted_class, y_seen)
|
559 |
+
indptr = np.hstack((0, np.cumsum(y_in_classes)))
|
560 |
+
|
561 |
+
data = np.empty_like(indices)
|
562 |
+
data.fill(pos_label)
|
563 |
+
Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes))
|
564 |
+
elif y_type == "multilabel-indicator":
|
565 |
+
Y = sp.csr_matrix(y)
|
566 |
+
if pos_label != 1:
|
567 |
+
data = np.empty_like(Y.data)
|
568 |
+
data.fill(pos_label)
|
569 |
+
Y.data = data
|
570 |
+
else:
|
571 |
+
raise ValueError(
|
572 |
+
"%s target data is not supported with label binarization" % y_type
|
573 |
+
)
|
574 |
+
|
575 |
+
if not sparse_output:
|
576 |
+
Y = Y.toarray()
|
577 |
+
Y = Y.astype(int, copy=False)
|
578 |
+
|
579 |
+
if neg_label != 0:
|
580 |
+
Y[Y == 0] = neg_label
|
581 |
+
|
582 |
+
if pos_switch:
|
583 |
+
Y[Y == pos_label] = 0
|
584 |
+
else:
|
585 |
+
Y.data = Y.data.astype(int, copy=False)
|
586 |
+
|
587 |
+
# preserve label ordering
|
588 |
+
if np.any(classes != sorted_class):
|
589 |
+
indices = np.searchsorted(sorted_class, classes)
|
590 |
+
Y = Y[:, indices]
|
591 |
+
|
592 |
+
if y_type == "binary":
|
593 |
+
if sparse_output:
|
594 |
+
Y = Y.getcol(-1)
|
595 |
+
else:
|
596 |
+
Y = Y[:, -1].reshape((-1, 1))
|
597 |
+
|
598 |
+
return Y
|
599 |
+
|
600 |
+
|
601 |
+
def _inverse_binarize_multiclass(y, classes):
|
602 |
+
"""Inverse label binarization transformation for multiclass.
|
603 |
+
|
604 |
+
Multiclass uses the maximal score instead of a threshold.
|
605 |
+
"""
|
606 |
+
classes = np.asarray(classes)
|
607 |
+
|
608 |
+
if sp.issparse(y):
|
609 |
+
# Find the argmax for each row in y where y is a CSR matrix
|
610 |
+
|
611 |
+
y = y.tocsr()
|
612 |
+
n_samples, n_outputs = y.shape
|
613 |
+
outputs = np.arange(n_outputs)
|
614 |
+
row_max = min_max_axis(y, 1)[1]
|
615 |
+
row_nnz = np.diff(y.indptr)
|
616 |
+
|
617 |
+
y_data_repeated_max = np.repeat(row_max, row_nnz)
|
618 |
+
# picks out all indices obtaining the maximum per row
|
619 |
+
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
|
620 |
+
|
621 |
+
# For corner case where last row has a max of 0
|
622 |
+
if row_max[-1] == 0:
|
623 |
+
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
|
624 |
+
|
625 |
+
# Gets the index of the first argmax in each row from y_i_all_argmax
|
626 |
+
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
|
627 |
+
# first argmax of each row
|
628 |
+
y_ind_ext = np.append(y.indices, [0])
|
629 |
+
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
|
630 |
+
# Handle rows of all 0
|
631 |
+
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
|
632 |
+
|
633 |
+
# Handles rows with max of 0 that contain negative numbers
|
634 |
+
samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)]
|
635 |
+
for i in samples:
|
636 |
+
ind = y.indices[y.indptr[i] : y.indptr[i + 1]]
|
637 |
+
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
|
638 |
+
|
639 |
+
return classes[y_i_argmax]
|
640 |
+
else:
|
641 |
+
return classes.take(y.argmax(axis=1), mode="clip")
|
642 |
+
|
643 |
+
|
644 |
+
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
|
645 |
+
"""Inverse label binarization transformation using thresholding."""
|
646 |
+
|
647 |
+
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
|
648 |
+
raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape))
|
649 |
+
|
650 |
+
if output_type != "binary" and y.shape[1] != len(classes):
|
651 |
+
raise ValueError(
|
652 |
+
"The number of class is not equal to the number of dimension of y."
|
653 |
+
)
|
654 |
+
|
655 |
+
classes = np.asarray(classes)
|
656 |
+
|
657 |
+
# Perform thresholding
|
658 |
+
if sp.issparse(y):
|
659 |
+
if threshold > 0:
|
660 |
+
if y.format not in ("csr", "csc"):
|
661 |
+
y = y.tocsr()
|
662 |
+
y.data = np.array(y.data > threshold, dtype=int)
|
663 |
+
y.eliminate_zeros()
|
664 |
+
else:
|
665 |
+
y = np.array(y.toarray() > threshold, dtype=int)
|
666 |
+
else:
|
667 |
+
y = np.array(y > threshold, dtype=int)
|
668 |
+
|
669 |
+
# Inverse transform data
|
670 |
+
if output_type == "binary":
|
671 |
+
if sp.issparse(y):
|
672 |
+
y = y.toarray()
|
673 |
+
if y.ndim == 2 and y.shape[1] == 2:
|
674 |
+
return classes[y[:, 1]]
|
675 |
+
else:
|
676 |
+
if len(classes) == 1:
|
677 |
+
return np.repeat(classes[0], len(y))
|
678 |
+
else:
|
679 |
+
return classes[y.ravel()]
|
680 |
+
|
681 |
+
elif output_type == "multilabel-indicator":
|
682 |
+
return y
|
683 |
+
|
684 |
+
else:
|
685 |
+
raise ValueError("{0} format is not supported".format(output_type))
|
686 |
+
|
687 |
+
|
688 |
+
class MultiLabelBinarizer(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
|
689 |
+
"""Transform between iterable of iterables and a multilabel format.
|
690 |
+
|
691 |
+
Although a list of sets or tuples is a very intuitive format for multilabel
|
692 |
+
data, it is unwieldy to process. This transformer converts between this
|
693 |
+
intuitive format and the supported multilabel format: a (samples x classes)
|
694 |
+
binary matrix indicating the presence of a class label.
|
695 |
+
|
696 |
+
Parameters
|
697 |
+
----------
|
698 |
+
classes : array-like of shape (n_classes,), default=None
|
699 |
+
Indicates an ordering for the class labels.
|
700 |
+
All entries should be unique (cannot contain duplicate classes).
|
701 |
+
|
702 |
+
sparse_output : bool, default=False
|
703 |
+
Set to True if output binary array is desired in CSR sparse format.
|
704 |
+
|
705 |
+
Attributes
|
706 |
+
----------
|
707 |
+
classes_ : ndarray of shape (n_classes,)
|
708 |
+
A copy of the `classes` parameter when provided.
|
709 |
+
Otherwise it corresponds to the sorted set of classes found
|
710 |
+
when fitting.
|
711 |
+
|
712 |
+
See Also
|
713 |
+
--------
|
714 |
+
OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
|
715 |
+
scheme.
|
716 |
+
|
717 |
+
Examples
|
718 |
+
--------
|
719 |
+
>>> from sklearn.preprocessing import MultiLabelBinarizer
|
720 |
+
>>> mlb = MultiLabelBinarizer()
|
721 |
+
>>> mlb.fit_transform([(1, 2), (3,)])
|
722 |
+
array([[1, 1, 0],
|
723 |
+
[0, 0, 1]])
|
724 |
+
>>> mlb.classes_
|
725 |
+
array([1, 2, 3])
|
726 |
+
|
727 |
+
>>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])
|
728 |
+
array([[0, 1, 1],
|
729 |
+
[1, 0, 0]])
|
730 |
+
>>> list(mlb.classes_)
|
731 |
+
['comedy', 'sci-fi', 'thriller']
|
732 |
+
|
733 |
+
A common mistake is to pass in a list, which leads to the following issue:
|
734 |
+
|
735 |
+
>>> mlb = MultiLabelBinarizer()
|
736 |
+
>>> mlb.fit(['sci-fi', 'thriller', 'comedy'])
|
737 |
+
MultiLabelBinarizer()
|
738 |
+
>>> mlb.classes_
|
739 |
+
array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't',
|
740 |
+
'y'], dtype=object)
|
741 |
+
|
742 |
+
To correct this, the list of labels should be passed in as:
|
743 |
+
|
744 |
+
>>> mlb = MultiLabelBinarizer()
|
745 |
+
>>> mlb.fit([['sci-fi', 'thriller', 'comedy']])
|
746 |
+
MultiLabelBinarizer()
|
747 |
+
>>> mlb.classes_
|
748 |
+
array(['comedy', 'sci-fi', 'thriller'], dtype=object)
|
749 |
+
"""
|
750 |
+
|
751 |
+
_parameter_constraints: dict = {
|
752 |
+
"classes": ["array-like", None],
|
753 |
+
"sparse_output": ["boolean"],
|
754 |
+
}
|
755 |
+
|
756 |
+
def __init__(self, *, classes=None, sparse_output=False):
|
757 |
+
self.classes = classes
|
758 |
+
self.sparse_output = sparse_output
|
759 |
+
|
760 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
761 |
+
def fit(self, y):
|
762 |
+
"""Fit the label sets binarizer, storing :term:`classes_`.
|
763 |
+
|
764 |
+
Parameters
|
765 |
+
----------
|
766 |
+
y : iterable of iterables
|
767 |
+
A set of labels (any orderable and hashable object) for each
|
768 |
+
sample. If the `classes` parameter is set, `y` will not be
|
769 |
+
iterated.
|
770 |
+
|
771 |
+
Returns
|
772 |
+
-------
|
773 |
+
self : object
|
774 |
+
Fitted estimator.
|
775 |
+
"""
|
776 |
+
self._cached_dict = None
|
777 |
+
|
778 |
+
if self.classes is None:
|
779 |
+
classes = sorted(set(itertools.chain.from_iterable(y)))
|
780 |
+
elif len(set(self.classes)) < len(self.classes):
|
781 |
+
raise ValueError(
|
782 |
+
"The classes argument contains duplicate "
|
783 |
+
"classes. Remove these duplicates before passing "
|
784 |
+
"them to MultiLabelBinarizer."
|
785 |
+
)
|
786 |
+
else:
|
787 |
+
classes = self.classes
|
788 |
+
dtype = int if all(isinstance(c, int) for c in classes) else object
|
789 |
+
self.classes_ = np.empty(len(classes), dtype=dtype)
|
790 |
+
self.classes_[:] = classes
|
791 |
+
return self
|
792 |
+
|
793 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
794 |
+
def fit_transform(self, y):
|
795 |
+
"""Fit the label sets binarizer and transform the given label sets.
|
796 |
+
|
797 |
+
Parameters
|
798 |
+
----------
|
799 |
+
y : iterable of iterables
|
800 |
+
A set of labels (any orderable and hashable object) for each
|
801 |
+
sample. If the `classes` parameter is set, `y` will not be
|
802 |
+
iterated.
|
803 |
+
|
804 |
+
Returns
|
805 |
+
-------
|
806 |
+
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
807 |
+
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
|
808 |
+
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
|
809 |
+
format.
|
810 |
+
"""
|
811 |
+
if self.classes is not None:
|
812 |
+
return self.fit(y).transform(y)
|
813 |
+
|
814 |
+
self._cached_dict = None
|
815 |
+
|
816 |
+
# Automatically increment on new class
|
817 |
+
class_mapping = defaultdict(int)
|
818 |
+
class_mapping.default_factory = class_mapping.__len__
|
819 |
+
yt = self._transform(y, class_mapping)
|
820 |
+
|
821 |
+
# sort classes and reorder columns
|
822 |
+
tmp = sorted(class_mapping, key=class_mapping.get)
|
823 |
+
|
824 |
+
# (make safe for tuples)
|
825 |
+
dtype = int if all(isinstance(c, int) for c in tmp) else object
|
826 |
+
class_mapping = np.empty(len(tmp), dtype=dtype)
|
827 |
+
class_mapping[:] = tmp
|
828 |
+
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
|
829 |
+
# ensure yt.indices keeps its current dtype
|
830 |
+
yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype)
|
831 |
+
|
832 |
+
if not self.sparse_output:
|
833 |
+
yt = yt.toarray()
|
834 |
+
|
835 |
+
return yt
|
836 |
+
|
837 |
+
def transform(self, y):
|
838 |
+
"""Transform the given label sets.
|
839 |
+
|
840 |
+
Parameters
|
841 |
+
----------
|
842 |
+
y : iterable of iterables
|
843 |
+
A set of labels (any orderable and hashable object) for each
|
844 |
+
sample. If the `classes` parameter is set, `y` will not be
|
845 |
+
iterated.
|
846 |
+
|
847 |
+
Returns
|
848 |
+
-------
|
849 |
+
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
|
850 |
+
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
|
851 |
+
`y[i]`, and 0 otherwise.
|
852 |
+
"""
|
853 |
+
check_is_fitted(self)
|
854 |
+
|
855 |
+
class_to_index = self._build_cache()
|
856 |
+
yt = self._transform(y, class_to_index)
|
857 |
+
|
858 |
+
if not self.sparse_output:
|
859 |
+
yt = yt.toarray()
|
860 |
+
|
861 |
+
return yt
|
862 |
+
|
863 |
+
def _build_cache(self):
|
864 |
+
if self._cached_dict is None:
|
865 |
+
self._cached_dict = dict(zip(self.classes_, range(len(self.classes_))))
|
866 |
+
|
867 |
+
return self._cached_dict
|
868 |
+
|
869 |
+
def _transform(self, y, class_mapping):
|
870 |
+
"""Transforms the label sets with a given mapping.
|
871 |
+
|
872 |
+
Parameters
|
873 |
+
----------
|
874 |
+
y : iterable of iterables
|
875 |
+
A set of labels (any orderable and hashable object) for each
|
876 |
+
sample. If the `classes` parameter is set, `y` will not be
|
877 |
+
iterated.
|
878 |
+
|
879 |
+
class_mapping : Mapping
|
880 |
+
Maps from label to column index in label indicator matrix.
|
881 |
+
|
882 |
+
Returns
|
883 |
+
-------
|
884 |
+
y_indicator : sparse matrix of shape (n_samples, n_classes)
|
885 |
+
Label indicator matrix. Will be of CSR format.
|
886 |
+
"""
|
887 |
+
indices = array.array("i")
|
888 |
+
indptr = array.array("i", [0])
|
889 |
+
unknown = set()
|
890 |
+
for labels in y:
|
891 |
+
index = set()
|
892 |
+
for label in labels:
|
893 |
+
try:
|
894 |
+
index.add(class_mapping[label])
|
895 |
+
except KeyError:
|
896 |
+
unknown.add(label)
|
897 |
+
indices.extend(index)
|
898 |
+
indptr.append(len(indices))
|
899 |
+
if unknown:
|
900 |
+
warnings.warn(
|
901 |
+
"unknown class(es) {0} will be ignored".format(sorted(unknown, key=str))
|
902 |
+
)
|
903 |
+
data = np.ones(len(indices), dtype=int)
|
904 |
+
|
905 |
+
return sp.csr_matrix(
|
906 |
+
(data, indices, indptr), shape=(len(indptr) - 1, len(class_mapping))
|
907 |
+
)
|
908 |
+
|
909 |
+
def inverse_transform(self, yt):
|
910 |
+
"""Transform the given indicator matrix into label sets.
|
911 |
+
|
912 |
+
Parameters
|
913 |
+
----------
|
914 |
+
yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)
|
915 |
+
A matrix containing only 1s ands 0s.
|
916 |
+
|
917 |
+
Returns
|
918 |
+
-------
|
919 |
+
y : list of tuples
|
920 |
+
The set of labels for each sample such that `y[i]` consists of
|
921 |
+
`classes_[j]` for each `yt[i, j] == 1`.
|
922 |
+
"""
|
923 |
+
check_is_fitted(self)
|
924 |
+
|
925 |
+
if yt.shape[1] != len(self.classes_):
|
926 |
+
raise ValueError(
|
927 |
+
"Expected indicator for {0} classes, but got {1}".format(
|
928 |
+
len(self.classes_), yt.shape[1]
|
929 |
+
)
|
930 |
+
)
|
931 |
+
|
932 |
+
if sp.issparse(yt):
|
933 |
+
yt = yt.tocsr()
|
934 |
+
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
|
935 |
+
raise ValueError("Expected only 0s and 1s in label indicator.")
|
936 |
+
return [
|
937 |
+
tuple(self.classes_.take(yt.indices[start:end]))
|
938 |
+
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])
|
939 |
+
]
|
940 |
+
else:
|
941 |
+
unexpected = np.setdiff1d(yt, [0, 1])
|
942 |
+
if len(unexpected) > 0:
|
943 |
+
raise ValueError(
|
944 |
+
"Expected only 0s and 1s in label indicator. Also got {0}".format(
|
945 |
+
unexpected
|
946 |
+
)
|
947 |
+
)
|
948 |
+
return [tuple(self.classes_.compress(indicators)) for indicators in yt]
|
949 |
+
|
950 |
+
def _more_tags(self):
|
951 |
+
return {"X_types": ["2dlabels"]}
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_polynomial.py
ADDED
@@ -0,0 +1,1172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains preprocessing tools based on polynomials.
|
3 |
+
"""
|
4 |
+
import collections
|
5 |
+
from itertools import chain, combinations
|
6 |
+
from itertools import combinations_with_replacement as combinations_w_r
|
7 |
+
from numbers import Integral
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
from scipy import sparse
|
11 |
+
from scipy.interpolate import BSpline
|
12 |
+
from scipy.special import comb
|
13 |
+
|
14 |
+
from ..base import BaseEstimator, TransformerMixin, _fit_context
|
15 |
+
from ..utils import check_array
|
16 |
+
from ..utils._param_validation import Interval, StrOptions
|
17 |
+
from ..utils.fixes import parse_version, sp_version
|
18 |
+
from ..utils.stats import _weighted_percentile
|
19 |
+
from ..utils.validation import (
|
20 |
+
FLOAT_DTYPES,
|
21 |
+
_check_feature_names_in,
|
22 |
+
_check_sample_weight,
|
23 |
+
check_is_fitted,
|
24 |
+
)
|
25 |
+
from ._csr_polynomial_expansion import (
|
26 |
+
_calc_expanded_nnz,
|
27 |
+
_calc_total_nnz,
|
28 |
+
_csr_polynomial_expansion,
|
29 |
+
)
|
30 |
+
|
31 |
+
__all__ = [
|
32 |
+
"PolynomialFeatures",
|
33 |
+
"SplineTransformer",
|
34 |
+
]
|
35 |
+
|
36 |
+
|
37 |
+
def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0):
|
38 |
+
"""Helper function for creating and appending sparse expansion matrices"""
|
39 |
+
|
40 |
+
total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg)
|
41 |
+
expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg)
|
42 |
+
|
43 |
+
if expanded_col == 0:
|
44 |
+
return None
|
45 |
+
# This only checks whether each block needs 64bit integers upon
|
46 |
+
# expansion. We prefer to keep int32 indexing where we can,
|
47 |
+
# since currently SciPy's CSR construction downcasts when possible,
|
48 |
+
# so we prefer to avoid an unnecessary cast. The dtype may still
|
49 |
+
# change in the concatenation process if needed.
|
50 |
+
# See: https://github.com/scipy/scipy/issues/16569
|
51 |
+
max_indices = expanded_col - 1
|
52 |
+
max_indptr = total_nnz
|
53 |
+
max_int32 = np.iinfo(np.int32).max
|
54 |
+
needs_int64 = max(max_indices, max_indptr) > max_int32
|
55 |
+
index_dtype = np.int64 if needs_int64 else np.int32
|
56 |
+
|
57 |
+
# This is a pretty specific bug that is hard to work around by a user,
|
58 |
+
# hence we do not detail the entire bug and all possible avoidance
|
59 |
+
# mechnasisms. Instead we recommend upgrading scipy or shrinking their data.
|
60 |
+
cumulative_size += expanded_col
|
61 |
+
if (
|
62 |
+
sp_version < parse_version("1.8.0")
|
63 |
+
and cumulative_size - 1 > max_int32
|
64 |
+
and not needs_int64
|
65 |
+
):
|
66 |
+
raise ValueError(
|
67 |
+
"In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`"
|
68 |
+
" sometimes produces negative columns when the output shape contains"
|
69 |
+
" `n_cols` too large to be represented by a 32bit signed"
|
70 |
+
" integer. To avoid this error, either use a version"
|
71 |
+
" of scipy `>=1.8.0` or alter the `PolynomialFeatures`"
|
72 |
+
" transformer to produce fewer than 2^31 output features."
|
73 |
+
)
|
74 |
+
|
75 |
+
# Result of the expansion, modified in place by the
|
76 |
+
# `_csr_polynomial_expansion` routine.
|
77 |
+
expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype)
|
78 |
+
expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype)
|
79 |
+
expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype)
|
80 |
+
_csr_polynomial_expansion(
|
81 |
+
X.data,
|
82 |
+
X.indices,
|
83 |
+
X.indptr,
|
84 |
+
X.shape[1],
|
85 |
+
expanded_data,
|
86 |
+
expanded_indices,
|
87 |
+
expanded_indptr,
|
88 |
+
interaction_only,
|
89 |
+
deg,
|
90 |
+
)
|
91 |
+
return sparse.csr_matrix(
|
92 |
+
(expanded_data, expanded_indices, expanded_indptr),
|
93 |
+
shape=(X.indptr.shape[0] - 1, expanded_col),
|
94 |
+
dtype=X.dtype,
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
class PolynomialFeatures(TransformerMixin, BaseEstimator):
|
99 |
+
"""Generate polynomial and interaction features.
|
100 |
+
|
101 |
+
Generate a new feature matrix consisting of all polynomial combinations
|
102 |
+
of the features with degree less than or equal to the specified degree.
|
103 |
+
For example, if an input sample is two dimensional and of the form
|
104 |
+
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
|
105 |
+
|
106 |
+
Read more in the :ref:`User Guide <polynomial_features>`.
|
107 |
+
|
108 |
+
Parameters
|
109 |
+
----------
|
110 |
+
degree : int or tuple (min_degree, max_degree), default=2
|
111 |
+
If a single int is given, it specifies the maximal degree of the
|
112 |
+
polynomial features. If a tuple `(min_degree, max_degree)` is passed,
|
113 |
+
then `min_degree` is the minimum and `max_degree` is the maximum
|
114 |
+
polynomial degree of the generated features. Note that `min_degree=0`
|
115 |
+
and `min_degree=1` are equivalent as outputting the degree zero term is
|
116 |
+
determined by `include_bias`.
|
117 |
+
|
118 |
+
interaction_only : bool, default=False
|
119 |
+
If `True`, only interaction features are produced: features that are
|
120 |
+
products of at most `degree` *distinct* input features, i.e. terms with
|
121 |
+
power of 2 or higher of the same input feature are excluded:
|
122 |
+
|
123 |
+
- included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc.
|
124 |
+
- excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc.
|
125 |
+
|
126 |
+
include_bias : bool, default=True
|
127 |
+
If `True` (default), then include a bias column, the feature in which
|
128 |
+
all polynomial powers are zero (i.e. a column of ones - acts as an
|
129 |
+
intercept term in a linear model).
|
130 |
+
|
131 |
+
order : {'C', 'F'}, default='C'
|
132 |
+
Order of output array in the dense case. `'F'` order is faster to
|
133 |
+
compute, but may slow down subsequent estimators.
|
134 |
+
|
135 |
+
.. versionadded:: 0.21
|
136 |
+
|
137 |
+
Attributes
|
138 |
+
----------
|
139 |
+
powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`)
|
140 |
+
`powers_[i, j]` is the exponent of the jth input in the ith output.
|
141 |
+
|
142 |
+
n_features_in_ : int
|
143 |
+
Number of features seen during :term:`fit`.
|
144 |
+
|
145 |
+
.. versionadded:: 0.24
|
146 |
+
|
147 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
148 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
149 |
+
has feature names that are all strings.
|
150 |
+
|
151 |
+
.. versionadded:: 1.0
|
152 |
+
|
153 |
+
n_output_features_ : int
|
154 |
+
The total number of polynomial output features. The number of output
|
155 |
+
features is computed by iterating over all suitably sized combinations
|
156 |
+
of input features.
|
157 |
+
|
158 |
+
See Also
|
159 |
+
--------
|
160 |
+
SplineTransformer : Transformer that generates univariate B-spline bases
|
161 |
+
for features.
|
162 |
+
|
163 |
+
Notes
|
164 |
+
-----
|
165 |
+
Be aware that the number of features in the output array scales
|
166 |
+
polynomially in the number of features of the input array, and
|
167 |
+
exponentially in the degree. High degrees can cause overfitting.
|
168 |
+
|
169 |
+
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
|
170 |
+
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
|
171 |
+
|
172 |
+
Examples
|
173 |
+
--------
|
174 |
+
>>> import numpy as np
|
175 |
+
>>> from sklearn.preprocessing import PolynomialFeatures
|
176 |
+
>>> X = np.arange(6).reshape(3, 2)
|
177 |
+
>>> X
|
178 |
+
array([[0, 1],
|
179 |
+
[2, 3],
|
180 |
+
[4, 5]])
|
181 |
+
>>> poly = PolynomialFeatures(2)
|
182 |
+
>>> poly.fit_transform(X)
|
183 |
+
array([[ 1., 0., 1., 0., 0., 1.],
|
184 |
+
[ 1., 2., 3., 4., 6., 9.],
|
185 |
+
[ 1., 4., 5., 16., 20., 25.]])
|
186 |
+
>>> poly = PolynomialFeatures(interaction_only=True)
|
187 |
+
>>> poly.fit_transform(X)
|
188 |
+
array([[ 1., 0., 1., 0.],
|
189 |
+
[ 1., 2., 3., 6.],
|
190 |
+
[ 1., 4., 5., 20.]])
|
191 |
+
"""
|
192 |
+
|
193 |
+
_parameter_constraints: dict = {
|
194 |
+
"degree": [Interval(Integral, 0, None, closed="left"), "array-like"],
|
195 |
+
"interaction_only": ["boolean"],
|
196 |
+
"include_bias": ["boolean"],
|
197 |
+
"order": [StrOptions({"C", "F"})],
|
198 |
+
}
|
199 |
+
|
200 |
+
def __init__(
|
201 |
+
self, degree=2, *, interaction_only=False, include_bias=True, order="C"
|
202 |
+
):
|
203 |
+
self.degree = degree
|
204 |
+
self.interaction_only = interaction_only
|
205 |
+
self.include_bias = include_bias
|
206 |
+
self.order = order
|
207 |
+
|
208 |
+
@staticmethod
|
209 |
+
def _combinations(
|
210 |
+
n_features, min_degree, max_degree, interaction_only, include_bias
|
211 |
+
):
|
212 |
+
comb = combinations if interaction_only else combinations_w_r
|
213 |
+
start = max(1, min_degree)
|
214 |
+
iter = chain.from_iterable(
|
215 |
+
comb(range(n_features), i) for i in range(start, max_degree + 1)
|
216 |
+
)
|
217 |
+
if include_bias:
|
218 |
+
iter = chain(comb(range(n_features), 0), iter)
|
219 |
+
return iter
|
220 |
+
|
221 |
+
@staticmethod
|
222 |
+
def _num_combinations(
|
223 |
+
n_features, min_degree, max_degree, interaction_only, include_bias
|
224 |
+
):
|
225 |
+
"""Calculate number of terms in polynomial expansion
|
226 |
+
|
227 |
+
This should be equivalent to counting the number of terms returned by
|
228 |
+
_combinations(...) but much faster.
|
229 |
+
"""
|
230 |
+
|
231 |
+
if interaction_only:
|
232 |
+
combinations = sum(
|
233 |
+
[
|
234 |
+
comb(n_features, i, exact=True)
|
235 |
+
for i in range(max(1, min_degree), min(max_degree, n_features) + 1)
|
236 |
+
]
|
237 |
+
)
|
238 |
+
else:
|
239 |
+
combinations = comb(n_features + max_degree, max_degree, exact=True) - 1
|
240 |
+
if min_degree > 0:
|
241 |
+
d = min_degree - 1
|
242 |
+
combinations -= comb(n_features + d, d, exact=True) - 1
|
243 |
+
|
244 |
+
if include_bias:
|
245 |
+
combinations += 1
|
246 |
+
|
247 |
+
return combinations
|
248 |
+
|
249 |
+
@property
|
250 |
+
def powers_(self):
|
251 |
+
"""Exponent for each of the inputs in the output."""
|
252 |
+
check_is_fitted(self)
|
253 |
+
|
254 |
+
combinations = self._combinations(
|
255 |
+
n_features=self.n_features_in_,
|
256 |
+
min_degree=self._min_degree,
|
257 |
+
max_degree=self._max_degree,
|
258 |
+
interaction_only=self.interaction_only,
|
259 |
+
include_bias=self.include_bias,
|
260 |
+
)
|
261 |
+
return np.vstack(
|
262 |
+
[np.bincount(c, minlength=self.n_features_in_) for c in combinations]
|
263 |
+
)
|
264 |
+
|
265 |
+
def get_feature_names_out(self, input_features=None):
|
266 |
+
"""Get output feature names for transformation.
|
267 |
+
|
268 |
+
Parameters
|
269 |
+
----------
|
270 |
+
input_features : array-like of str or None, default=None
|
271 |
+
Input features.
|
272 |
+
|
273 |
+
- If `input_features is None`, then `feature_names_in_` is
|
274 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
275 |
+
then the following input feature names are generated:
|
276 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
277 |
+
- If `input_features` is an array-like, then `input_features` must
|
278 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
279 |
+
|
280 |
+
Returns
|
281 |
+
-------
|
282 |
+
feature_names_out : ndarray of str objects
|
283 |
+
Transformed feature names.
|
284 |
+
"""
|
285 |
+
powers = self.powers_
|
286 |
+
input_features = _check_feature_names_in(self, input_features)
|
287 |
+
feature_names = []
|
288 |
+
for row in powers:
|
289 |
+
inds = np.where(row)[0]
|
290 |
+
if len(inds):
|
291 |
+
name = " ".join(
|
292 |
+
(
|
293 |
+
"%s^%d" % (input_features[ind], exp)
|
294 |
+
if exp != 1
|
295 |
+
else input_features[ind]
|
296 |
+
)
|
297 |
+
for ind, exp in zip(inds, row[inds])
|
298 |
+
)
|
299 |
+
else:
|
300 |
+
name = "1"
|
301 |
+
feature_names.append(name)
|
302 |
+
return np.asarray(feature_names, dtype=object)
|
303 |
+
|
304 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
305 |
+
def fit(self, X, y=None):
|
306 |
+
"""
|
307 |
+
Compute number of output features.
|
308 |
+
|
309 |
+
Parameters
|
310 |
+
----------
|
311 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
312 |
+
The data.
|
313 |
+
|
314 |
+
y : Ignored
|
315 |
+
Not used, present here for API consistency by convention.
|
316 |
+
|
317 |
+
Returns
|
318 |
+
-------
|
319 |
+
self : object
|
320 |
+
Fitted transformer.
|
321 |
+
"""
|
322 |
+
_, n_features = self._validate_data(X, accept_sparse=True).shape
|
323 |
+
|
324 |
+
if isinstance(self.degree, Integral):
|
325 |
+
if self.degree == 0 and not self.include_bias:
|
326 |
+
raise ValueError(
|
327 |
+
"Setting degree to zero and include_bias to False would result in"
|
328 |
+
" an empty output array."
|
329 |
+
)
|
330 |
+
|
331 |
+
self._min_degree = 0
|
332 |
+
self._max_degree = self.degree
|
333 |
+
elif (
|
334 |
+
isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2
|
335 |
+
):
|
336 |
+
self._min_degree, self._max_degree = self.degree
|
337 |
+
if not (
|
338 |
+
isinstance(self._min_degree, Integral)
|
339 |
+
and isinstance(self._max_degree, Integral)
|
340 |
+
and self._min_degree >= 0
|
341 |
+
and self._min_degree <= self._max_degree
|
342 |
+
):
|
343 |
+
raise ValueError(
|
344 |
+
"degree=(min_degree, max_degree) must "
|
345 |
+
"be non-negative integers that fulfil "
|
346 |
+
"min_degree <= max_degree, got "
|
347 |
+
f"{self.degree}."
|
348 |
+
)
|
349 |
+
elif self._max_degree == 0 and not self.include_bias:
|
350 |
+
raise ValueError(
|
351 |
+
"Setting both min_degree and max_degree to zero and include_bias to"
|
352 |
+
" False would result in an empty output array."
|
353 |
+
)
|
354 |
+
else:
|
355 |
+
raise ValueError(
|
356 |
+
"degree must be a non-negative int or tuple "
|
357 |
+
"(min_degree, max_degree), got "
|
358 |
+
f"{self.degree}."
|
359 |
+
)
|
360 |
+
|
361 |
+
self.n_output_features_ = self._num_combinations(
|
362 |
+
n_features=n_features,
|
363 |
+
min_degree=self._min_degree,
|
364 |
+
max_degree=self._max_degree,
|
365 |
+
interaction_only=self.interaction_only,
|
366 |
+
include_bias=self.include_bias,
|
367 |
+
)
|
368 |
+
if self.n_output_features_ > np.iinfo(np.intp).max:
|
369 |
+
msg = (
|
370 |
+
"The output that would result from the current configuration would"
|
371 |
+
f" have {self.n_output_features_} features which is too large to be"
|
372 |
+
f" indexed by {np.intp().dtype.name}. Please change some or all of the"
|
373 |
+
" following:\n- The number of features in the input, currently"
|
374 |
+
f" {n_features=}\n- The range of degrees to calculate, currently"
|
375 |
+
f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only"
|
376 |
+
f" interaction terms, currently {self.interaction_only}\n- Whether to"
|
377 |
+
f" include a bias term, currently {self.include_bias}."
|
378 |
+
)
|
379 |
+
if (
|
380 |
+
np.intp == np.int32
|
381 |
+
and self.n_output_features_ <= np.iinfo(np.int64).max
|
382 |
+
): # pragma: nocover
|
383 |
+
msg += (
|
384 |
+
"\nNote that the current Python runtime has a limited 32 bit "
|
385 |
+
"address space and that this configuration would have been "
|
386 |
+
"admissible if run on a 64 bit Python runtime."
|
387 |
+
)
|
388 |
+
raise ValueError(msg)
|
389 |
+
# We also record the number of output features for
|
390 |
+
# _max_degree = 0
|
391 |
+
self._n_out_full = self._num_combinations(
|
392 |
+
n_features=n_features,
|
393 |
+
min_degree=0,
|
394 |
+
max_degree=self._max_degree,
|
395 |
+
interaction_only=self.interaction_only,
|
396 |
+
include_bias=self.include_bias,
|
397 |
+
)
|
398 |
+
|
399 |
+
return self
|
400 |
+
|
401 |
+
def transform(self, X):
|
402 |
+
"""Transform data to polynomial features.
|
403 |
+
|
404 |
+
Parameters
|
405 |
+
----------
|
406 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
407 |
+
The data to transform, row by row.
|
408 |
+
|
409 |
+
Prefer CSR over CSC for sparse input (for speed), but CSC is
|
410 |
+
required if the degree is 4 or higher. If the degree is less than
|
411 |
+
4 and the input format is CSC, it will be converted to CSR, have
|
412 |
+
its polynomial features generated, then converted back to CSC.
|
413 |
+
|
414 |
+
If the degree is 2 or 3, the method described in "Leveraging
|
415 |
+
Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices
|
416 |
+
Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is
|
417 |
+
used, which is much faster than the method used on CSC input. For
|
418 |
+
this reason, a CSC input will be converted to CSR, and the output
|
419 |
+
will be converted back to CSC prior to being returned, hence the
|
420 |
+
preference of CSR.
|
421 |
+
|
422 |
+
Returns
|
423 |
+
-------
|
424 |
+
XP : {ndarray, sparse matrix} of shape (n_samples, NP)
|
425 |
+
The matrix of features, where `NP` is the number of polynomial
|
426 |
+
features generated from the combination of inputs. If a sparse
|
427 |
+
matrix is provided, it will be converted into a sparse
|
428 |
+
`csr_matrix`.
|
429 |
+
"""
|
430 |
+
check_is_fitted(self)
|
431 |
+
|
432 |
+
X = self._validate_data(
|
433 |
+
X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc")
|
434 |
+
)
|
435 |
+
|
436 |
+
n_samples, n_features = X.shape
|
437 |
+
max_int32 = np.iinfo(np.int32).max
|
438 |
+
if sparse.issparse(X) and X.format == "csr":
|
439 |
+
if self._max_degree > 3:
|
440 |
+
return self.transform(X.tocsc()).tocsr()
|
441 |
+
to_stack = []
|
442 |
+
if self.include_bias:
|
443 |
+
to_stack.append(
|
444 |
+
sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype))
|
445 |
+
)
|
446 |
+
if self._min_degree <= 1 and self._max_degree > 0:
|
447 |
+
to_stack.append(X)
|
448 |
+
|
449 |
+
cumulative_size = sum(mat.shape[1] for mat in to_stack)
|
450 |
+
for deg in range(max(2, self._min_degree), self._max_degree + 1):
|
451 |
+
expanded = _create_expansion(
|
452 |
+
X=X,
|
453 |
+
interaction_only=self.interaction_only,
|
454 |
+
deg=deg,
|
455 |
+
n_features=n_features,
|
456 |
+
cumulative_size=cumulative_size,
|
457 |
+
)
|
458 |
+
if expanded is not None:
|
459 |
+
to_stack.append(expanded)
|
460 |
+
cumulative_size += expanded.shape[1]
|
461 |
+
if len(to_stack) == 0:
|
462 |
+
# edge case: deal with empty matrix
|
463 |
+
XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype)
|
464 |
+
else:
|
465 |
+
# `scipy.sparse.hstack` breaks in scipy<1.9.2
|
466 |
+
# when `n_output_features_ > max_int32`
|
467 |
+
all_int32 = all(mat.indices.dtype == np.int32 for mat in to_stack)
|
468 |
+
if (
|
469 |
+
sp_version < parse_version("1.9.2")
|
470 |
+
and self.n_output_features_ > max_int32
|
471 |
+
and all_int32
|
472 |
+
):
|
473 |
+
raise ValueError( # pragma: no cover
|
474 |
+
"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
|
475 |
+
" produces negative columns when:\n1. The output shape contains"
|
476 |
+
" `n_cols` too large to be represented by a 32bit signed"
|
477 |
+
" integer.\n2. All sub-matrices to be stacked have indices of"
|
478 |
+
" dtype `np.int32`.\nTo avoid this error, either use a version"
|
479 |
+
" of scipy `>=1.9.2` or alter the `PolynomialFeatures`"
|
480 |
+
" transformer to produce fewer than 2^31 output features"
|
481 |
+
)
|
482 |
+
XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr")
|
483 |
+
elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4:
|
484 |
+
return self.transform(X.tocsr()).tocsc()
|
485 |
+
elif sparse.issparse(X):
|
486 |
+
combinations = self._combinations(
|
487 |
+
n_features=n_features,
|
488 |
+
min_degree=self._min_degree,
|
489 |
+
max_degree=self._max_degree,
|
490 |
+
interaction_only=self.interaction_only,
|
491 |
+
include_bias=self.include_bias,
|
492 |
+
)
|
493 |
+
columns = []
|
494 |
+
for combi in combinations:
|
495 |
+
if combi:
|
496 |
+
out_col = 1
|
497 |
+
for col_idx in combi:
|
498 |
+
out_col = X[:, [col_idx]].multiply(out_col)
|
499 |
+
columns.append(out_col)
|
500 |
+
else:
|
501 |
+
bias = sparse.csc_matrix(np.ones((X.shape[0], 1)))
|
502 |
+
columns.append(bias)
|
503 |
+
XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
|
504 |
+
else:
|
505 |
+
# Do as if _min_degree = 0 and cut down array after the
|
506 |
+
# computation, i.e. use _n_out_full instead of n_output_features_.
|
507 |
+
XP = np.empty(
|
508 |
+
shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order
|
509 |
+
)
|
510 |
+
|
511 |
+
# What follows is a faster implementation of:
|
512 |
+
# for i, comb in enumerate(combinations):
|
513 |
+
# XP[:, i] = X[:, comb].prod(1)
|
514 |
+
# This implementation uses two optimisations.
|
515 |
+
# First one is broadcasting,
|
516 |
+
# multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1]
|
517 |
+
# multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2]
|
518 |
+
# ...
|
519 |
+
# multiply ([X[:, start:end], X[:, start]) -> ...
|
520 |
+
# Second optimisation happens for degrees >= 3.
|
521 |
+
# Xi^3 is computed reusing previous computation:
|
522 |
+
# Xi^3 = Xi^2 * Xi.
|
523 |
+
|
524 |
+
# degree 0 term
|
525 |
+
if self.include_bias:
|
526 |
+
XP[:, 0] = 1
|
527 |
+
current_col = 1
|
528 |
+
else:
|
529 |
+
current_col = 0
|
530 |
+
|
531 |
+
if self._max_degree == 0:
|
532 |
+
return XP
|
533 |
+
|
534 |
+
# degree 1 term
|
535 |
+
XP[:, current_col : current_col + n_features] = X
|
536 |
+
index = list(range(current_col, current_col + n_features))
|
537 |
+
current_col += n_features
|
538 |
+
index.append(current_col)
|
539 |
+
|
540 |
+
# loop over degree >= 2 terms
|
541 |
+
for _ in range(2, self._max_degree + 1):
|
542 |
+
new_index = []
|
543 |
+
end = index[-1]
|
544 |
+
for feature_idx in range(n_features):
|
545 |
+
start = index[feature_idx]
|
546 |
+
new_index.append(current_col)
|
547 |
+
if self.interaction_only:
|
548 |
+
start += index[feature_idx + 1] - index[feature_idx]
|
549 |
+
next_col = current_col + end - start
|
550 |
+
if next_col <= current_col:
|
551 |
+
break
|
552 |
+
# XP[:, start:end] are terms of degree d - 1
|
553 |
+
# that exclude feature #feature_idx.
|
554 |
+
np.multiply(
|
555 |
+
XP[:, start:end],
|
556 |
+
X[:, feature_idx : feature_idx + 1],
|
557 |
+
out=XP[:, current_col:next_col],
|
558 |
+
casting="no",
|
559 |
+
)
|
560 |
+
current_col = next_col
|
561 |
+
|
562 |
+
new_index.append(current_col)
|
563 |
+
index = new_index
|
564 |
+
|
565 |
+
if self._min_degree > 1:
|
566 |
+
n_XP, n_Xout = self._n_out_full, self.n_output_features_
|
567 |
+
if self.include_bias:
|
568 |
+
Xout = np.empty(
|
569 |
+
shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order
|
570 |
+
)
|
571 |
+
Xout[:, 0] = 1
|
572 |
+
Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :]
|
573 |
+
else:
|
574 |
+
Xout = XP[:, n_XP - n_Xout :].copy()
|
575 |
+
XP = Xout
|
576 |
+
return XP
|
577 |
+
|
578 |
+
|
579 |
+
class SplineTransformer(TransformerMixin, BaseEstimator):
|
580 |
+
"""Generate univariate B-spline bases for features.
|
581 |
+
|
582 |
+
Generate a new feature matrix consisting of
|
583 |
+
`n_splines=n_knots + degree - 1` (`n_knots - 1` for
|
584 |
+
`extrapolation="periodic"`) spline basis functions
|
585 |
+
(B-splines) of polynomial order=`degree` for each feature.
|
586 |
+
|
587 |
+
In order to learn more about the SplineTransformer class go to:
|
588 |
+
:ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py`
|
589 |
+
|
590 |
+
Read more in the :ref:`User Guide <spline_transformer>`.
|
591 |
+
|
592 |
+
.. versionadded:: 1.0
|
593 |
+
|
594 |
+
Parameters
|
595 |
+
----------
|
596 |
+
n_knots : int, default=5
|
597 |
+
Number of knots of the splines if `knots` equals one of
|
598 |
+
{'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots`
|
599 |
+
is array-like.
|
600 |
+
|
601 |
+
degree : int, default=3
|
602 |
+
The polynomial degree of the spline basis. Must be a non-negative
|
603 |
+
integer.
|
604 |
+
|
605 |
+
knots : {'uniform', 'quantile'} or array-like of shape \
|
606 |
+
(n_knots, n_features), default='uniform'
|
607 |
+
Set knot positions such that first knot <= features <= last knot.
|
608 |
+
|
609 |
+
- If 'uniform', `n_knots` number of knots are distributed uniformly
|
610 |
+
from min to max values of the features.
|
611 |
+
- If 'quantile', they are distributed uniformly along the quantiles of
|
612 |
+
the features.
|
613 |
+
- If an array-like is given, it directly specifies the sorted knot
|
614 |
+
positions including the boundary knots. Note that, internally,
|
615 |
+
`degree` number of knots are added before the first knot, the same
|
616 |
+
after the last knot.
|
617 |
+
|
618 |
+
extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \
|
619 |
+
default='constant'
|
620 |
+
If 'error', values outside the min and max values of the training
|
621 |
+
features raises a `ValueError`. If 'constant', the value of the
|
622 |
+
splines at minimum and maximum value of the features is used as
|
623 |
+
constant extrapolation. If 'linear', a linear extrapolation is used.
|
624 |
+
If 'continue', the splines are extrapolated as is, i.e. option
|
625 |
+
`extrapolate=True` in :class:`scipy.interpolate.BSpline`. If
|
626 |
+
'periodic', periodic splines with a periodicity equal to the distance
|
627 |
+
between the first and last knot are used. Periodic splines enforce
|
628 |
+
equal function values and derivatives at the first and last knot.
|
629 |
+
For example, this makes it possible to avoid introducing an arbitrary
|
630 |
+
jump between Dec 31st and Jan 1st in spline features derived from a
|
631 |
+
naturally periodic "day-of-year" input feature. In this case it is
|
632 |
+
recommended to manually set the knot values to control the period.
|
633 |
+
|
634 |
+
include_bias : bool, default=True
|
635 |
+
If False, then the last spline element inside the data range
|
636 |
+
of a feature is dropped. As B-splines sum to one over the spline basis
|
637 |
+
functions for each data point, they implicitly include a bias term,
|
638 |
+
i.e. a column of ones. It acts as an intercept term in a linear models.
|
639 |
+
|
640 |
+
order : {'C', 'F'}, default='C'
|
641 |
+
Order of output array in the dense case. `'F'` order is faster to compute, but
|
642 |
+
may slow down subsequent estimators.
|
643 |
+
|
644 |
+
sparse_output : bool, default=False
|
645 |
+
Will return sparse CSR matrix if set True else will return an array. This
|
646 |
+
option is only available with `scipy>=1.8`.
|
647 |
+
|
648 |
+
.. versionadded:: 1.2
|
649 |
+
|
650 |
+
Attributes
|
651 |
+
----------
|
652 |
+
bsplines_ : list of shape (n_features,)
|
653 |
+
List of BSplines objects, one for each feature.
|
654 |
+
|
655 |
+
n_features_in_ : int
|
656 |
+
The total number of input features.
|
657 |
+
|
658 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
659 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
660 |
+
has feature names that are all strings.
|
661 |
+
|
662 |
+
.. versionadded:: 1.0
|
663 |
+
|
664 |
+
n_features_out_ : int
|
665 |
+
The total number of output features, which is computed as
|
666 |
+
`n_features * n_splines`, where `n_splines` is
|
667 |
+
the number of bases elements of the B-splines,
|
668 |
+
`n_knots + degree - 1` for non-periodic splines and
|
669 |
+
`n_knots - 1` for periodic ones.
|
670 |
+
If `include_bias=False`, then it is only
|
671 |
+
`n_features * (n_splines - 1)`.
|
672 |
+
|
673 |
+
See Also
|
674 |
+
--------
|
675 |
+
KBinsDiscretizer : Transformer that bins continuous data into intervals.
|
676 |
+
|
677 |
+
PolynomialFeatures : Transformer that generates polynomial and interaction
|
678 |
+
features.
|
679 |
+
|
680 |
+
Notes
|
681 |
+
-----
|
682 |
+
High degrees and a high number of knots can cause overfitting.
|
683 |
+
|
684 |
+
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
|
685 |
+
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`.
|
686 |
+
|
687 |
+
Examples
|
688 |
+
--------
|
689 |
+
>>> import numpy as np
|
690 |
+
>>> from sklearn.preprocessing import SplineTransformer
|
691 |
+
>>> X = np.arange(6).reshape(6, 1)
|
692 |
+
>>> spline = SplineTransformer(degree=2, n_knots=3)
|
693 |
+
>>> spline.fit_transform(X)
|
694 |
+
array([[0.5 , 0.5 , 0. , 0. ],
|
695 |
+
[0.18, 0.74, 0.08, 0. ],
|
696 |
+
[0.02, 0.66, 0.32, 0. ],
|
697 |
+
[0. , 0.32, 0.66, 0.02],
|
698 |
+
[0. , 0.08, 0.74, 0.18],
|
699 |
+
[0. , 0. , 0.5 , 0.5 ]])
|
700 |
+
"""
|
701 |
+
|
702 |
+
_parameter_constraints: dict = {
|
703 |
+
"n_knots": [Interval(Integral, 2, None, closed="left")],
|
704 |
+
"degree": [Interval(Integral, 0, None, closed="left")],
|
705 |
+
"knots": [StrOptions({"uniform", "quantile"}), "array-like"],
|
706 |
+
"extrapolation": [
|
707 |
+
StrOptions({"error", "constant", "linear", "continue", "periodic"})
|
708 |
+
],
|
709 |
+
"include_bias": ["boolean"],
|
710 |
+
"order": [StrOptions({"C", "F"})],
|
711 |
+
"sparse_output": ["boolean"],
|
712 |
+
}
|
713 |
+
|
714 |
+
def __init__(
|
715 |
+
self,
|
716 |
+
n_knots=5,
|
717 |
+
degree=3,
|
718 |
+
*,
|
719 |
+
knots="uniform",
|
720 |
+
extrapolation="constant",
|
721 |
+
include_bias=True,
|
722 |
+
order="C",
|
723 |
+
sparse_output=False,
|
724 |
+
):
|
725 |
+
self.n_knots = n_knots
|
726 |
+
self.degree = degree
|
727 |
+
self.knots = knots
|
728 |
+
self.extrapolation = extrapolation
|
729 |
+
self.include_bias = include_bias
|
730 |
+
self.order = order
|
731 |
+
self.sparse_output = sparse_output
|
732 |
+
|
733 |
+
@staticmethod
|
734 |
+
def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None):
|
735 |
+
"""Calculate base knot positions.
|
736 |
+
|
737 |
+
Base knots such that first knot <= feature <= last knot. For the
|
738 |
+
B-spline construction with scipy.interpolate.BSpline, 2*degree knots
|
739 |
+
beyond the base interval are added.
|
740 |
+
|
741 |
+
Returns
|
742 |
+
-------
|
743 |
+
knots : ndarray of shape (n_knots, n_features), dtype=np.float64
|
744 |
+
Knot positions (points) of base interval.
|
745 |
+
"""
|
746 |
+
if knots == "quantile":
|
747 |
+
percentiles = 100 * np.linspace(
|
748 |
+
start=0, stop=1, num=n_knots, dtype=np.float64
|
749 |
+
)
|
750 |
+
|
751 |
+
if sample_weight is None:
|
752 |
+
knots = np.percentile(X, percentiles, axis=0)
|
753 |
+
else:
|
754 |
+
knots = np.array(
|
755 |
+
[
|
756 |
+
_weighted_percentile(X, sample_weight, percentile)
|
757 |
+
for percentile in percentiles
|
758 |
+
]
|
759 |
+
)
|
760 |
+
|
761 |
+
else:
|
762 |
+
# knots == 'uniform':
|
763 |
+
# Note that the variable `knots` has already been validated and
|
764 |
+
# `else` is therefore safe.
|
765 |
+
# Disregard observations with zero weight.
|
766 |
+
mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
|
767 |
+
x_min = np.amin(X[mask], axis=0)
|
768 |
+
x_max = np.amax(X[mask], axis=0)
|
769 |
+
|
770 |
+
knots = np.linspace(
|
771 |
+
start=x_min,
|
772 |
+
stop=x_max,
|
773 |
+
num=n_knots,
|
774 |
+
endpoint=True,
|
775 |
+
dtype=np.float64,
|
776 |
+
)
|
777 |
+
|
778 |
+
return knots
|
779 |
+
|
780 |
+
def get_feature_names_out(self, input_features=None):
|
781 |
+
"""Get output feature names for transformation.
|
782 |
+
|
783 |
+
Parameters
|
784 |
+
----------
|
785 |
+
input_features : array-like of str or None, default=None
|
786 |
+
Input features.
|
787 |
+
|
788 |
+
- If `input_features` is `None`, then `feature_names_in_` is
|
789 |
+
used as feature names in. If `feature_names_in_` is not defined,
|
790 |
+
then the following input feature names are generated:
|
791 |
+
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
792 |
+
- If `input_features` is an array-like, then `input_features` must
|
793 |
+
match `feature_names_in_` if `feature_names_in_` is defined.
|
794 |
+
|
795 |
+
Returns
|
796 |
+
-------
|
797 |
+
feature_names_out : ndarray of str objects
|
798 |
+
Transformed feature names.
|
799 |
+
"""
|
800 |
+
check_is_fitted(self, "n_features_in_")
|
801 |
+
n_splines = self.bsplines_[0].c.shape[1]
|
802 |
+
|
803 |
+
input_features = _check_feature_names_in(self, input_features)
|
804 |
+
feature_names = []
|
805 |
+
for i in range(self.n_features_in_):
|
806 |
+
for j in range(n_splines - 1 + self.include_bias):
|
807 |
+
feature_names.append(f"{input_features[i]}_sp_{j}")
|
808 |
+
return np.asarray(feature_names, dtype=object)
|
809 |
+
|
810 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
811 |
+
def fit(self, X, y=None, sample_weight=None):
|
812 |
+
"""Compute knot positions of splines.
|
813 |
+
|
814 |
+
Parameters
|
815 |
+
----------
|
816 |
+
X : array-like of shape (n_samples, n_features)
|
817 |
+
The data.
|
818 |
+
|
819 |
+
y : None
|
820 |
+
Ignored.
|
821 |
+
|
822 |
+
sample_weight : array-like of shape (n_samples,), default = None
|
823 |
+
Individual weights for each sample. Used to calculate quantiles if
|
824 |
+
`knots="quantile"`. For `knots="uniform"`, zero weighted
|
825 |
+
observations are ignored for finding the min and max of `X`.
|
826 |
+
|
827 |
+
Returns
|
828 |
+
-------
|
829 |
+
self : object
|
830 |
+
Fitted transformer.
|
831 |
+
"""
|
832 |
+
X = self._validate_data(
|
833 |
+
X,
|
834 |
+
reset=True,
|
835 |
+
accept_sparse=False,
|
836 |
+
ensure_min_samples=2,
|
837 |
+
ensure_2d=True,
|
838 |
+
)
|
839 |
+
if sample_weight is not None:
|
840 |
+
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
|
841 |
+
|
842 |
+
_, n_features = X.shape
|
843 |
+
|
844 |
+
if isinstance(self.knots, str):
|
845 |
+
base_knots = self._get_base_knot_positions(
|
846 |
+
X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight
|
847 |
+
)
|
848 |
+
else:
|
849 |
+
base_knots = check_array(self.knots, dtype=np.float64)
|
850 |
+
if base_knots.shape[0] < 2:
|
851 |
+
raise ValueError("Number of knots, knots.shape[0], must be >= 2.")
|
852 |
+
elif base_knots.shape[1] != n_features:
|
853 |
+
raise ValueError("knots.shape[1] == n_features is violated.")
|
854 |
+
elif not np.all(np.diff(base_knots, axis=0) > 0):
|
855 |
+
raise ValueError("knots must be sorted without duplicates.")
|
856 |
+
|
857 |
+
if self.sparse_output and sp_version < parse_version("1.8.0"):
|
858 |
+
raise ValueError(
|
859 |
+
"Option sparse_output=True is only available with scipy>=1.8.0, "
|
860 |
+
f"but here scipy=={sp_version} is used."
|
861 |
+
)
|
862 |
+
|
863 |
+
# number of knots for base interval
|
864 |
+
n_knots = base_knots.shape[0]
|
865 |
+
|
866 |
+
if self.extrapolation == "periodic" and n_knots <= self.degree:
|
867 |
+
raise ValueError(
|
868 |
+
"Periodic splines require degree < n_knots. Got n_knots="
|
869 |
+
f"{n_knots} and degree={self.degree}."
|
870 |
+
)
|
871 |
+
|
872 |
+
# number of splines basis functions
|
873 |
+
if self.extrapolation != "periodic":
|
874 |
+
n_splines = n_knots + self.degree - 1
|
875 |
+
else:
|
876 |
+
# periodic splines have self.degree less degrees of freedom
|
877 |
+
n_splines = n_knots - 1
|
878 |
+
|
879 |
+
degree = self.degree
|
880 |
+
n_out = n_features * n_splines
|
881 |
+
# We have to add degree number of knots below, and degree number knots
|
882 |
+
# above the base knots in order to make the spline basis complete.
|
883 |
+
if self.extrapolation == "periodic":
|
884 |
+
# For periodic splines the spacing of the first / last degree knots
|
885 |
+
# needs to be a continuation of the spacing of the last / first
|
886 |
+
# base knots.
|
887 |
+
period = base_knots[-1] - base_knots[0]
|
888 |
+
knots = np.r_[
|
889 |
+
base_knots[-(degree + 1) : -1] - period,
|
890 |
+
base_knots,
|
891 |
+
base_knots[1 : (degree + 1)] + period,
|
892 |
+
]
|
893 |
+
|
894 |
+
else:
|
895 |
+
# Eilers & Marx in "Flexible smoothing with B-splines and
|
896 |
+
# penalties" https://doi.org/10.1214/ss/1038425655 advice
|
897 |
+
# against repeating first and last knot several times, which
|
898 |
+
# would have inferior behaviour at boundaries if combined with
|
899 |
+
# a penalty (hence P-Spline). We follow this advice even if our
|
900 |
+
# splines are unpenalized. Meaning we do not:
|
901 |
+
# knots = np.r_[
|
902 |
+
# np.tile(base_knots.min(axis=0), reps=[degree, 1]),
|
903 |
+
# base_knots,
|
904 |
+
# np.tile(base_knots.max(axis=0), reps=[degree, 1])
|
905 |
+
# ]
|
906 |
+
# Instead, we reuse the distance of the 2 fist/last knots.
|
907 |
+
dist_min = base_knots[1] - base_knots[0]
|
908 |
+
dist_max = base_knots[-1] - base_knots[-2]
|
909 |
+
|
910 |
+
knots = np.r_[
|
911 |
+
np.linspace(
|
912 |
+
base_knots[0] - degree * dist_min,
|
913 |
+
base_knots[0] - dist_min,
|
914 |
+
num=degree,
|
915 |
+
),
|
916 |
+
base_knots,
|
917 |
+
np.linspace(
|
918 |
+
base_knots[-1] + dist_max,
|
919 |
+
base_knots[-1] + degree * dist_max,
|
920 |
+
num=degree,
|
921 |
+
),
|
922 |
+
]
|
923 |
+
|
924 |
+
# With a diagonal coefficient matrix, we get back the spline basis
|
925 |
+
# elements, i.e. the design matrix of the spline.
|
926 |
+
# Note, BSpline appreciates C-contiguous float64 arrays as c=coef.
|
927 |
+
coef = np.eye(n_splines, dtype=np.float64)
|
928 |
+
if self.extrapolation == "periodic":
|
929 |
+
coef = np.concatenate((coef, coef[:degree, :]))
|
930 |
+
|
931 |
+
extrapolate = self.extrapolation in ["periodic", "continue"]
|
932 |
+
|
933 |
+
bsplines = [
|
934 |
+
BSpline.construct_fast(
|
935 |
+
knots[:, i], coef, self.degree, extrapolate=extrapolate
|
936 |
+
)
|
937 |
+
for i in range(n_features)
|
938 |
+
]
|
939 |
+
self.bsplines_ = bsplines
|
940 |
+
|
941 |
+
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
|
942 |
+
return self
|
943 |
+
|
944 |
+
def transform(self, X):
|
945 |
+
"""Transform each feature data to B-splines.
|
946 |
+
|
947 |
+
Parameters
|
948 |
+
----------
|
949 |
+
X : array-like of shape (n_samples, n_features)
|
950 |
+
The data to transform.
|
951 |
+
|
952 |
+
Returns
|
953 |
+
-------
|
954 |
+
XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines)
|
955 |
+
The matrix of features, where n_splines is the number of bases
|
956 |
+
elements of the B-splines, n_knots + degree - 1.
|
957 |
+
"""
|
958 |
+
check_is_fitted(self)
|
959 |
+
|
960 |
+
X = self._validate_data(X, reset=False, accept_sparse=False, ensure_2d=True)
|
961 |
+
|
962 |
+
n_samples, n_features = X.shape
|
963 |
+
n_splines = self.bsplines_[0].c.shape[1]
|
964 |
+
degree = self.degree
|
965 |
+
|
966 |
+
# TODO: Remove this condition, once scipy 1.10 is the minimum version.
|
967 |
+
# Only scipy => 1.10 supports design_matrix(.., extrapolate=..).
|
968 |
+
# The default (implicit in scipy < 1.10) is extrapolate=False.
|
969 |
+
scipy_1_10 = sp_version >= parse_version("1.10.0")
|
970 |
+
# Note: self.bsplines_[0].extrapolate is True for extrapolation in
|
971 |
+
# ["periodic", "continue"]
|
972 |
+
if scipy_1_10:
|
973 |
+
use_sparse = self.sparse_output
|
974 |
+
kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate}
|
975 |
+
else:
|
976 |
+
use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate
|
977 |
+
kwargs_extrapolate = dict()
|
978 |
+
|
979 |
+
# Note that scipy BSpline returns float64 arrays and converts input
|
980 |
+
# x=X[:, i] to c-contiguous float64.
|
981 |
+
n_out = self.n_features_out_ + n_features * (1 - self.include_bias)
|
982 |
+
if X.dtype in FLOAT_DTYPES:
|
983 |
+
dtype = X.dtype
|
984 |
+
else:
|
985 |
+
dtype = np.float64
|
986 |
+
if use_sparse:
|
987 |
+
output_list = []
|
988 |
+
else:
|
989 |
+
XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order)
|
990 |
+
|
991 |
+
for i in range(n_features):
|
992 |
+
spl = self.bsplines_[i]
|
993 |
+
|
994 |
+
if self.extrapolation in ("continue", "error", "periodic"):
|
995 |
+
if self.extrapolation == "periodic":
|
996 |
+
# With periodic extrapolation we map x to the segment
|
997 |
+
# [spl.t[k], spl.t[n]].
|
998 |
+
# This is equivalent to BSpline(.., extrapolate="periodic")
|
999 |
+
# for scipy>=1.0.0.
|
1000 |
+
n = spl.t.size - spl.k - 1
|
1001 |
+
# Assign to new array to avoid inplace operation
|
1002 |
+
x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % (
|
1003 |
+
spl.t[n] - spl.t[spl.k]
|
1004 |
+
)
|
1005 |
+
else:
|
1006 |
+
x = X[:, i]
|
1007 |
+
|
1008 |
+
if use_sparse:
|
1009 |
+
XBS_sparse = BSpline.design_matrix(
|
1010 |
+
x, spl.t, spl.k, **kwargs_extrapolate
|
1011 |
+
)
|
1012 |
+
if self.extrapolation == "periodic":
|
1013 |
+
# See the construction of coef in fit. We need to add the last
|
1014 |
+
# degree spline basis function to the first degree ones and
|
1015 |
+
# then drop the last ones.
|
1016 |
+
# Note: See comment about SparseEfficiencyWarning below.
|
1017 |
+
XBS_sparse = XBS_sparse.tolil()
|
1018 |
+
XBS_sparse[:, :degree] += XBS_sparse[:, -degree:]
|
1019 |
+
XBS_sparse = XBS_sparse[:, :-degree]
|
1020 |
+
else:
|
1021 |
+
XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x)
|
1022 |
+
else: # extrapolation in ("constant", "linear")
|
1023 |
+
xmin, xmax = spl.t[degree], spl.t[-degree - 1]
|
1024 |
+
# spline values at boundaries
|
1025 |
+
f_min, f_max = spl(xmin), spl(xmax)
|
1026 |
+
mask = (xmin <= X[:, i]) & (X[:, i] <= xmax)
|
1027 |
+
if use_sparse:
|
1028 |
+
mask_inv = ~mask
|
1029 |
+
x = X[:, i].copy()
|
1030 |
+
# Set some arbitrary values outside boundary that will be reassigned
|
1031 |
+
# later.
|
1032 |
+
x[mask_inv] = spl.t[self.degree]
|
1033 |
+
XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k)
|
1034 |
+
# Note: Without converting to lil_matrix we would get:
|
1035 |
+
# scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity
|
1036 |
+
# structure of a csr_matrix is expensive. lil_matrix is more
|
1037 |
+
# efficient.
|
1038 |
+
if np.any(mask_inv):
|
1039 |
+
XBS_sparse = XBS_sparse.tolil()
|
1040 |
+
XBS_sparse[mask_inv, :] = 0
|
1041 |
+
else:
|
1042 |
+
XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i])
|
1043 |
+
|
1044 |
+
# Note for extrapolation:
|
1045 |
+
# 'continue' is already returned as is by scipy BSplines
|
1046 |
+
if self.extrapolation == "error":
|
1047 |
+
# BSpline with extrapolate=False does not raise an error, but
|
1048 |
+
# outputs np.nan.
|
1049 |
+
if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or (
|
1050 |
+
not use_sparse
|
1051 |
+
and np.any(
|
1052 |
+
np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)])
|
1053 |
+
)
|
1054 |
+
):
|
1055 |
+
raise ValueError(
|
1056 |
+
"X contains values beyond the limits of the knots."
|
1057 |
+
)
|
1058 |
+
elif self.extrapolation == "constant":
|
1059 |
+
# Set all values beyond xmin and xmax to the value of the
|
1060 |
+
# spline basis functions at those two positions.
|
1061 |
+
# Only the first degree and last degree number of splines
|
1062 |
+
# have non-zero values at the boundaries.
|
1063 |
+
|
1064 |
+
mask = X[:, i] < xmin
|
1065 |
+
if np.any(mask):
|
1066 |
+
if use_sparse:
|
1067 |
+
# Note: See comment about SparseEfficiencyWarning above.
|
1068 |
+
XBS_sparse = XBS_sparse.tolil()
|
1069 |
+
XBS_sparse[mask, :degree] = f_min[:degree]
|
1070 |
+
|
1071 |
+
else:
|
1072 |
+
XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[
|
1073 |
+
:degree
|
1074 |
+
]
|
1075 |
+
|
1076 |
+
mask = X[:, i] > xmax
|
1077 |
+
if np.any(mask):
|
1078 |
+
if use_sparse:
|
1079 |
+
# Note: See comment about SparseEfficiencyWarning above.
|
1080 |
+
XBS_sparse = XBS_sparse.tolil()
|
1081 |
+
XBS_sparse[mask, -degree:] = f_max[-degree:]
|
1082 |
+
else:
|
1083 |
+
XBS[
|
1084 |
+
mask,
|
1085 |
+
((i + 1) * n_splines - degree) : ((i + 1) * n_splines),
|
1086 |
+
] = f_max[-degree:]
|
1087 |
+
|
1088 |
+
elif self.extrapolation == "linear":
|
1089 |
+
# Continue the degree first and degree last spline bases
|
1090 |
+
# linearly beyond the boundaries, with slope = derivative at
|
1091 |
+
# the boundary.
|
1092 |
+
# Note that all others have derivative = value = 0 at the
|
1093 |
+
# boundaries.
|
1094 |
+
|
1095 |
+
# spline derivatives = slopes at boundaries
|
1096 |
+
fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1)
|
1097 |
+
# Compute the linear continuation.
|
1098 |
+
if degree <= 1:
|
1099 |
+
# For degree=1, the derivative of 2nd spline is not zero at
|
1100 |
+
# boundary. For degree=0 it is the same as 'constant'.
|
1101 |
+
degree += 1
|
1102 |
+
for j in range(degree):
|
1103 |
+
mask = X[:, i] < xmin
|
1104 |
+
if np.any(mask):
|
1105 |
+
linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j]
|
1106 |
+
if use_sparse:
|
1107 |
+
# Note: See comment about SparseEfficiencyWarning above.
|
1108 |
+
XBS_sparse = XBS_sparse.tolil()
|
1109 |
+
XBS_sparse[mask, j] = linear_extr
|
1110 |
+
else:
|
1111 |
+
XBS[mask, i * n_splines + j] = linear_extr
|
1112 |
+
|
1113 |
+
mask = X[:, i] > xmax
|
1114 |
+
if np.any(mask):
|
1115 |
+
k = n_splines - 1 - j
|
1116 |
+
linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k]
|
1117 |
+
if use_sparse:
|
1118 |
+
# Note: See comment about SparseEfficiencyWarning above.
|
1119 |
+
XBS_sparse = XBS_sparse.tolil()
|
1120 |
+
XBS_sparse[mask, k : k + 1] = linear_extr[:, None]
|
1121 |
+
else:
|
1122 |
+
XBS[mask, i * n_splines + k] = linear_extr
|
1123 |
+
|
1124 |
+
if use_sparse:
|
1125 |
+
XBS_sparse = XBS_sparse.tocsr()
|
1126 |
+
output_list.append(XBS_sparse)
|
1127 |
+
|
1128 |
+
if use_sparse:
|
1129 |
+
# TODO: Remove this conditional error when the minimum supported version of
|
1130 |
+
# SciPy is 1.9.2
|
1131 |
+
# `scipy.sparse.hstack` breaks in scipy<1.9.2
|
1132 |
+
# when `n_features_out_ > max_int32`
|
1133 |
+
max_int32 = np.iinfo(np.int32).max
|
1134 |
+
all_int32 = True
|
1135 |
+
for mat in output_list:
|
1136 |
+
all_int32 &= mat.indices.dtype == np.int32
|
1137 |
+
if (
|
1138 |
+
sp_version < parse_version("1.9.2")
|
1139 |
+
and self.n_features_out_ > max_int32
|
1140 |
+
and all_int32
|
1141 |
+
):
|
1142 |
+
raise ValueError(
|
1143 |
+
"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
|
1144 |
+
" produces negative columns when:\n1. The output shape contains"
|
1145 |
+
" `n_cols` too large to be represented by a 32bit signed"
|
1146 |
+
" integer.\n. All sub-matrices to be stacked have indices of"
|
1147 |
+
" dtype `np.int32`.\nTo avoid this error, either use a version"
|
1148 |
+
" of scipy `>=1.9.2` or alter the `SplineTransformer`"
|
1149 |
+
" transformer to produce fewer than 2^31 output features"
|
1150 |
+
)
|
1151 |
+
XBS = sparse.hstack(output_list, format="csr")
|
1152 |
+
elif self.sparse_output:
|
1153 |
+
# TODO: Remove ones scipy 1.10 is the minimum version. See comments above.
|
1154 |
+
XBS = sparse.csr_matrix(XBS)
|
1155 |
+
|
1156 |
+
if self.include_bias:
|
1157 |
+
return XBS
|
1158 |
+
else:
|
1159 |
+
# We throw away one spline basis per feature.
|
1160 |
+
# We chose the last one.
|
1161 |
+
indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0]
|
1162 |
+
return XBS[:, indices]
|
1163 |
+
|
1164 |
+
def _more_tags(self):
|
1165 |
+
return {
|
1166 |
+
"_xfail_checks": {
|
1167 |
+
"check_estimators_pickle": (
|
1168 |
+
"Current Scipy implementation of _bsplines does not"
|
1169 |
+
"support const memory views."
|
1170 |
+
),
|
1171 |
+
}
|
1172 |
+
}
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder.py
ADDED
@@ -0,0 +1,531 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numbers import Integral, Real
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from ..base import OneToOneFeatureMixin, _fit_context
|
6 |
+
from ..utils._param_validation import Interval, StrOptions
|
7 |
+
from ..utils.multiclass import type_of_target
|
8 |
+
from ..utils.validation import (
|
9 |
+
_check_feature_names_in,
|
10 |
+
_check_y,
|
11 |
+
check_consistent_length,
|
12 |
+
check_is_fitted,
|
13 |
+
)
|
14 |
+
from ._encoders import _BaseEncoder
|
15 |
+
from ._target_encoder_fast import _fit_encoding_fast, _fit_encoding_fast_auto_smooth
|
16 |
+
|
17 |
+
|
18 |
+
class TargetEncoder(OneToOneFeatureMixin, _BaseEncoder):
|
19 |
+
"""Target Encoder for regression and classification targets.
|
20 |
+
|
21 |
+
Each category is encoded based on a shrunk estimate of the average target
|
22 |
+
values for observations belonging to the category. The encoding scheme mixes
|
23 |
+
the global target mean with the target mean conditioned on the value of the
|
24 |
+
category (see [MIC]_).
|
25 |
+
|
26 |
+
When the target type is "multiclass", encodings are based
|
27 |
+
on the conditional probability estimate for each class. The target is first
|
28 |
+
binarized using the "one-vs-all" scheme via
|
29 |
+
:class:`~sklearn.preprocessing.LabelBinarizer`, then the average target
|
30 |
+
value for each class and each category is used for encoding, resulting in
|
31 |
+
`n_features` * `n_classes` encoded output features.
|
32 |
+
|
33 |
+
:class:`TargetEncoder` considers missing values, such as `np.nan` or `None`,
|
34 |
+
as another category and encodes them like any other category. Categories
|
35 |
+
that are not seen during :meth:`fit` are encoded with the target mean, i.e.
|
36 |
+
`target_mean_`.
|
37 |
+
|
38 |
+
For a demo on the importance of the `TargetEncoder` internal cross-fitting,
|
39 |
+
see
|
40 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder_cross_val.py`.
|
41 |
+
For a comparison of different encoders, refer to
|
42 |
+
:ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py`. Read
|
43 |
+
more in the :ref:`User Guide <target_encoder>`.
|
44 |
+
|
45 |
+
.. note::
|
46 |
+
`fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a
|
47 |
+
:term:`cross fitting` scheme is used in `fit_transform` for encoding.
|
48 |
+
See the :ref:`User Guide <target_encoder>` for details.
|
49 |
+
|
50 |
+
.. versionadded:: 1.3
|
51 |
+
|
52 |
+
Parameters
|
53 |
+
----------
|
54 |
+
categories : "auto" or list of shape (n_features,) of array-like, default="auto"
|
55 |
+
Categories (unique values) per feature:
|
56 |
+
|
57 |
+
- `"auto"` : Determine categories automatically from the training data.
|
58 |
+
- list : `categories[i]` holds the categories expected in the i-th column. The
|
59 |
+
passed categories should not mix strings and numeric values within a single
|
60 |
+
feature, and should be sorted in case of numeric values.
|
61 |
+
|
62 |
+
The used categories are stored in the `categories_` fitted attribute.
|
63 |
+
|
64 |
+
target_type : {"auto", "continuous", "binary", "multiclass"}, default="auto"
|
65 |
+
Type of target.
|
66 |
+
|
67 |
+
- `"auto"` : Type of target is inferred with
|
68 |
+
:func:`~sklearn.utils.multiclass.type_of_target`.
|
69 |
+
- `"continuous"` : Continuous target
|
70 |
+
- `"binary"` : Binary target
|
71 |
+
- `"multiclass"` : Multiclass target
|
72 |
+
|
73 |
+
.. note::
|
74 |
+
The type of target inferred with `"auto"` may not be the desired target
|
75 |
+
type used for modeling. For example, if the target consisted of integers
|
76 |
+
between 0 and 100, then :func:`~sklearn.utils.multiclass.type_of_target`
|
77 |
+
will infer the target as `"multiclass"`. In this case, setting
|
78 |
+
`target_type="continuous"` will specify the target as a regression
|
79 |
+
problem. The `target_type_` attribute gives the target type used by the
|
80 |
+
encoder.
|
81 |
+
|
82 |
+
.. versionchanged:: 1.4
|
83 |
+
Added the option 'multiclass'.
|
84 |
+
|
85 |
+
smooth : "auto" or float, default="auto"
|
86 |
+
The amount of mixing of the target mean conditioned on the value of the
|
87 |
+
category with the global target mean. A larger `smooth` value will put
|
88 |
+
more weight on the global target mean.
|
89 |
+
If `"auto"`, then `smooth` is set to an empirical Bayes estimate.
|
90 |
+
|
91 |
+
cv : int, default=5
|
92 |
+
Determines the number of folds in the :term:`cross fitting` strategy used in
|
93 |
+
:meth:`fit_transform`. For classification targets, `StratifiedKFold` is used
|
94 |
+
and for continuous targets, `KFold` is used.
|
95 |
+
|
96 |
+
shuffle : bool, default=True
|
97 |
+
Whether to shuffle the data in :meth:`fit_transform` before splitting into
|
98 |
+
folds. Note that the samples within each split will not be shuffled.
|
99 |
+
|
100 |
+
random_state : int, RandomState instance or None, default=None
|
101 |
+
When `shuffle` is True, `random_state` affects the ordering of the
|
102 |
+
indices, which controls the randomness of each fold. Otherwise, this
|
103 |
+
parameter has no effect.
|
104 |
+
Pass an int for reproducible output across multiple function calls.
|
105 |
+
See :term:`Glossary <random_state>`.
|
106 |
+
|
107 |
+
Attributes
|
108 |
+
----------
|
109 |
+
encodings_ : list of shape (n_features,) or (n_features * n_classes) of \
|
110 |
+
ndarray
|
111 |
+
Encodings learnt on all of `X`.
|
112 |
+
For feature `i`, `encodings_[i]` are the encodings matching the
|
113 |
+
categories listed in `categories_[i]`. When `target_type_` is
|
114 |
+
"multiclass", the encoding for feature `i` and class `j` is stored in
|
115 |
+
`encodings_[j + (i * len(classes_))]`. E.g., for 2 features (f) and
|
116 |
+
3 classes (c), encodings are ordered:
|
117 |
+
f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2,
|
118 |
+
|
119 |
+
categories_ : list of shape (n_features,) of ndarray
|
120 |
+
The categories of each input feature determined during fitting or
|
121 |
+
specified in `categories`
|
122 |
+
(in order of the features in `X` and corresponding with the output
|
123 |
+
of :meth:`transform`).
|
124 |
+
|
125 |
+
target_type_ : str
|
126 |
+
Type of target.
|
127 |
+
|
128 |
+
target_mean_ : float
|
129 |
+
The overall mean of the target. This value is only used in :meth:`transform`
|
130 |
+
to encode categories.
|
131 |
+
|
132 |
+
n_features_in_ : int
|
133 |
+
Number of features seen during :term:`fit`.
|
134 |
+
|
135 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
136 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
137 |
+
has feature names that are all strings.
|
138 |
+
|
139 |
+
classes_ : ndarray or None
|
140 |
+
If `target_type_` is 'binary' or 'multiclass', holds the label for each class,
|
141 |
+
otherwise `None`.
|
142 |
+
|
143 |
+
See Also
|
144 |
+
--------
|
145 |
+
OrdinalEncoder : Performs an ordinal (integer) encoding of the categorical features.
|
146 |
+
Contrary to TargetEncoder, this encoding is not supervised. Treating the
|
147 |
+
resulting encoding as a numerical features therefore lead arbitrarily
|
148 |
+
ordered values and therefore typically lead to lower predictive performance
|
149 |
+
when used as preprocessing for a classifier or regressor.
|
150 |
+
OneHotEncoder : Performs a one-hot encoding of categorical features. This
|
151 |
+
unsupervised encoding is better suited for low cardinality categorical
|
152 |
+
variables as it generate one new feature per unique category.
|
153 |
+
|
154 |
+
References
|
155 |
+
----------
|
156 |
+
.. [MIC] :doi:`Micci-Barreca, Daniele. "A preprocessing scheme for high-cardinality
|
157 |
+
categorical attributes in classification and prediction problems"
|
158 |
+
SIGKDD Explor. Newsl. 3, 1 (July 2001), 27–32. <10.1145/507533.507538>`
|
159 |
+
|
160 |
+
Examples
|
161 |
+
--------
|
162 |
+
With `smooth="auto"`, the smoothing parameter is set to an empirical Bayes estimate:
|
163 |
+
|
164 |
+
>>> import numpy as np
|
165 |
+
>>> from sklearn.preprocessing import TargetEncoder
|
166 |
+
>>> X = np.array([["dog"] * 20 + ["cat"] * 30 + ["snake"] * 38], dtype=object).T
|
167 |
+
>>> y = [90.3] * 5 + [80.1] * 15 + [20.4] * 5 + [20.1] * 25 + [21.2] * 8 + [49] * 30
|
168 |
+
>>> enc_auto = TargetEncoder(smooth="auto")
|
169 |
+
>>> X_trans = enc_auto.fit_transform(X, y)
|
170 |
+
|
171 |
+
>>> # A high `smooth` parameter puts more weight on global mean on the categorical
|
172 |
+
>>> # encodings:
|
173 |
+
>>> enc_high_smooth = TargetEncoder(smooth=5000.0).fit(X, y)
|
174 |
+
>>> enc_high_smooth.target_mean_
|
175 |
+
44...
|
176 |
+
>>> enc_high_smooth.encodings_
|
177 |
+
[array([44..., 44..., 44...])]
|
178 |
+
|
179 |
+
>>> # On the other hand, a low `smooth` parameter puts more weight on target
|
180 |
+
>>> # conditioned on the value of the categorical:
|
181 |
+
>>> enc_low_smooth = TargetEncoder(smooth=1.0).fit(X, y)
|
182 |
+
>>> enc_low_smooth.encodings_
|
183 |
+
[array([20..., 80..., 43...])]
|
184 |
+
"""
|
185 |
+
|
186 |
+
_parameter_constraints: dict = {
|
187 |
+
"categories": [StrOptions({"auto"}), list],
|
188 |
+
"target_type": [StrOptions({"auto", "continuous", "binary", "multiclass"})],
|
189 |
+
"smooth": [StrOptions({"auto"}), Interval(Real, 0, None, closed="left")],
|
190 |
+
"cv": [Interval(Integral, 2, None, closed="left")],
|
191 |
+
"shuffle": ["boolean"],
|
192 |
+
"random_state": ["random_state"],
|
193 |
+
}
|
194 |
+
|
195 |
+
def __init__(
|
196 |
+
self,
|
197 |
+
categories="auto",
|
198 |
+
target_type="auto",
|
199 |
+
smooth="auto",
|
200 |
+
cv=5,
|
201 |
+
shuffle=True,
|
202 |
+
random_state=None,
|
203 |
+
):
|
204 |
+
self.categories = categories
|
205 |
+
self.smooth = smooth
|
206 |
+
self.target_type = target_type
|
207 |
+
self.cv = cv
|
208 |
+
self.shuffle = shuffle
|
209 |
+
self.random_state = random_state
|
210 |
+
|
211 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
212 |
+
def fit(self, X, y):
|
213 |
+
"""Fit the :class:`TargetEncoder` to X and y.
|
214 |
+
|
215 |
+
Parameters
|
216 |
+
----------
|
217 |
+
X : array-like of shape (n_samples, n_features)
|
218 |
+
The data to determine the categories of each feature.
|
219 |
+
|
220 |
+
y : array-like of shape (n_samples,)
|
221 |
+
The target data used to encode the categories.
|
222 |
+
|
223 |
+
Returns
|
224 |
+
-------
|
225 |
+
self : object
|
226 |
+
Fitted encoder.
|
227 |
+
"""
|
228 |
+
self._fit_encodings_all(X, y)
|
229 |
+
return self
|
230 |
+
|
231 |
+
@_fit_context(prefer_skip_nested_validation=True)
|
232 |
+
def fit_transform(self, X, y):
|
233 |
+
"""Fit :class:`TargetEncoder` and transform X with the target encoding.
|
234 |
+
|
235 |
+
.. note::
|
236 |
+
`fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a
|
237 |
+
:term:`cross fitting` scheme is used in `fit_transform` for encoding.
|
238 |
+
See the :ref:`User Guide <target_encoder>`. for details.
|
239 |
+
|
240 |
+
Parameters
|
241 |
+
----------
|
242 |
+
X : array-like of shape (n_samples, n_features)
|
243 |
+
The data to determine the categories of each feature.
|
244 |
+
|
245 |
+
y : array-like of shape (n_samples,)
|
246 |
+
The target data used to encode the categories.
|
247 |
+
|
248 |
+
Returns
|
249 |
+
-------
|
250 |
+
X_trans : ndarray of shape (n_samples, n_features) or \
|
251 |
+
(n_samples, (n_features * n_classes))
|
252 |
+
Transformed input.
|
253 |
+
"""
|
254 |
+
from ..model_selection import KFold, StratifiedKFold # avoid circular import
|
255 |
+
|
256 |
+
X_ordinal, X_known_mask, y_encoded, n_categories = self._fit_encodings_all(X, y)
|
257 |
+
|
258 |
+
# The cv splitter is voluntarily restricted to *KFold to enforce non
|
259 |
+
# overlapping validation folds, otherwise the fit_transform output will
|
260 |
+
# not be well-specified.
|
261 |
+
if self.target_type_ == "continuous":
|
262 |
+
cv = KFold(self.cv, shuffle=self.shuffle, random_state=self.random_state)
|
263 |
+
else:
|
264 |
+
cv = StratifiedKFold(
|
265 |
+
self.cv, shuffle=self.shuffle, random_state=self.random_state
|
266 |
+
)
|
267 |
+
|
268 |
+
# If 'multiclass' multiply axis=1 by num classes else keep shape the same
|
269 |
+
if self.target_type_ == "multiclass":
|
270 |
+
X_out = np.empty(
|
271 |
+
(X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)),
|
272 |
+
dtype=np.float64,
|
273 |
+
)
|
274 |
+
else:
|
275 |
+
X_out = np.empty_like(X_ordinal, dtype=np.float64)
|
276 |
+
|
277 |
+
for train_idx, test_idx in cv.split(X, y):
|
278 |
+
X_train, y_train = X_ordinal[train_idx, :], y_encoded[train_idx]
|
279 |
+
y_train_mean = np.mean(y_train, axis=0)
|
280 |
+
|
281 |
+
if self.target_type_ == "multiclass":
|
282 |
+
encodings = self._fit_encoding_multiclass(
|
283 |
+
X_train,
|
284 |
+
y_train,
|
285 |
+
n_categories,
|
286 |
+
y_train_mean,
|
287 |
+
)
|
288 |
+
else:
|
289 |
+
encodings = self._fit_encoding_binary_or_continuous(
|
290 |
+
X_train,
|
291 |
+
y_train,
|
292 |
+
n_categories,
|
293 |
+
y_train_mean,
|
294 |
+
)
|
295 |
+
self._transform_X_ordinal(
|
296 |
+
X_out,
|
297 |
+
X_ordinal,
|
298 |
+
~X_known_mask,
|
299 |
+
test_idx,
|
300 |
+
encodings,
|
301 |
+
y_train_mean,
|
302 |
+
)
|
303 |
+
return X_out
|
304 |
+
|
305 |
+
def transform(self, X):
|
306 |
+
"""Transform X with the target encoding.
|
307 |
+
|
308 |
+
.. note::
|
309 |
+
`fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a
|
310 |
+
:term:`cross fitting` scheme is used in `fit_transform` for encoding.
|
311 |
+
See the :ref:`User Guide <target_encoder>`. for details.
|
312 |
+
|
313 |
+
Parameters
|
314 |
+
----------
|
315 |
+
X : array-like of shape (n_samples, n_features)
|
316 |
+
The data to determine the categories of each feature.
|
317 |
+
|
318 |
+
Returns
|
319 |
+
-------
|
320 |
+
X_trans : ndarray of shape (n_samples, n_features) or \
|
321 |
+
(n_samples, (n_features * n_classes))
|
322 |
+
Transformed input.
|
323 |
+
"""
|
324 |
+
X_ordinal, X_known_mask = self._transform(
|
325 |
+
X, handle_unknown="ignore", force_all_finite="allow-nan"
|
326 |
+
)
|
327 |
+
|
328 |
+
# If 'multiclass' multiply axis=1 by num of classes else keep shape the same
|
329 |
+
if self.target_type_ == "multiclass":
|
330 |
+
X_out = np.empty(
|
331 |
+
(X_ordinal.shape[0], X_ordinal.shape[1] * len(self.classes_)),
|
332 |
+
dtype=np.float64,
|
333 |
+
)
|
334 |
+
else:
|
335 |
+
X_out = np.empty_like(X_ordinal, dtype=np.float64)
|
336 |
+
|
337 |
+
self._transform_X_ordinal(
|
338 |
+
X_out,
|
339 |
+
X_ordinal,
|
340 |
+
~X_known_mask,
|
341 |
+
slice(None),
|
342 |
+
self.encodings_,
|
343 |
+
self.target_mean_,
|
344 |
+
)
|
345 |
+
return X_out
|
346 |
+
|
347 |
+
def _fit_encodings_all(self, X, y):
|
348 |
+
"""Fit a target encoding with all the data."""
|
349 |
+
# avoid circular import
|
350 |
+
from ..preprocessing import (
|
351 |
+
LabelBinarizer,
|
352 |
+
LabelEncoder,
|
353 |
+
)
|
354 |
+
|
355 |
+
check_consistent_length(X, y)
|
356 |
+
self._fit(X, handle_unknown="ignore", force_all_finite="allow-nan")
|
357 |
+
|
358 |
+
if self.target_type == "auto":
|
359 |
+
accepted_target_types = ("binary", "multiclass", "continuous")
|
360 |
+
inferred_type_of_target = type_of_target(y, input_name="y")
|
361 |
+
if inferred_type_of_target not in accepted_target_types:
|
362 |
+
raise ValueError(
|
363 |
+
"Unknown label type: Target type was inferred to be "
|
364 |
+
f"{inferred_type_of_target!r}. Only {accepted_target_types} are "
|
365 |
+
"supported."
|
366 |
+
)
|
367 |
+
self.target_type_ = inferred_type_of_target
|
368 |
+
else:
|
369 |
+
self.target_type_ = self.target_type
|
370 |
+
|
371 |
+
self.classes_ = None
|
372 |
+
if self.target_type_ == "binary":
|
373 |
+
label_encoder = LabelEncoder()
|
374 |
+
y = label_encoder.fit_transform(y)
|
375 |
+
self.classes_ = label_encoder.classes_
|
376 |
+
elif self.target_type_ == "multiclass":
|
377 |
+
label_binarizer = LabelBinarizer()
|
378 |
+
y = label_binarizer.fit_transform(y)
|
379 |
+
self.classes_ = label_binarizer.classes_
|
380 |
+
else: # continuous
|
381 |
+
y = _check_y(y, y_numeric=True, estimator=self)
|
382 |
+
|
383 |
+
self.target_mean_ = np.mean(y, axis=0)
|
384 |
+
|
385 |
+
X_ordinal, X_known_mask = self._transform(
|
386 |
+
X, handle_unknown="ignore", force_all_finite="allow-nan"
|
387 |
+
)
|
388 |
+
n_categories = np.fromiter(
|
389 |
+
(len(category_for_feature) for category_for_feature in self.categories_),
|
390 |
+
dtype=np.int64,
|
391 |
+
count=len(self.categories_),
|
392 |
+
)
|
393 |
+
if self.target_type_ == "multiclass":
|
394 |
+
encodings = self._fit_encoding_multiclass(
|
395 |
+
X_ordinal,
|
396 |
+
y,
|
397 |
+
n_categories,
|
398 |
+
self.target_mean_,
|
399 |
+
)
|
400 |
+
else:
|
401 |
+
encodings = self._fit_encoding_binary_or_continuous(
|
402 |
+
X_ordinal,
|
403 |
+
y,
|
404 |
+
n_categories,
|
405 |
+
self.target_mean_,
|
406 |
+
)
|
407 |
+
self.encodings_ = encodings
|
408 |
+
|
409 |
+
return X_ordinal, X_known_mask, y, n_categories
|
410 |
+
|
411 |
+
def _fit_encoding_binary_or_continuous(
|
412 |
+
self, X_ordinal, y, n_categories, target_mean
|
413 |
+
):
|
414 |
+
"""Learn target encodings."""
|
415 |
+
if self.smooth == "auto":
|
416 |
+
y_variance = np.var(y)
|
417 |
+
encodings = _fit_encoding_fast_auto_smooth(
|
418 |
+
X_ordinal,
|
419 |
+
y,
|
420 |
+
n_categories,
|
421 |
+
target_mean,
|
422 |
+
y_variance,
|
423 |
+
)
|
424 |
+
else:
|
425 |
+
encodings = _fit_encoding_fast(
|
426 |
+
X_ordinal,
|
427 |
+
y,
|
428 |
+
n_categories,
|
429 |
+
self.smooth,
|
430 |
+
target_mean,
|
431 |
+
)
|
432 |
+
return encodings
|
433 |
+
|
434 |
+
def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean):
|
435 |
+
"""Learn multiclass encodings.
|
436 |
+
|
437 |
+
Learn encodings for each class (c) then reorder encodings such that
|
438 |
+
the same features (f) are grouped together. `reorder_index` enables
|
439 |
+
converting from:
|
440 |
+
f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2
|
441 |
+
to:
|
442 |
+
f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2
|
443 |
+
"""
|
444 |
+
n_features = self.n_features_in_
|
445 |
+
n_classes = len(self.classes_)
|
446 |
+
|
447 |
+
encodings = []
|
448 |
+
for i in range(n_classes):
|
449 |
+
y_class = y[:, i]
|
450 |
+
encoding = self._fit_encoding_binary_or_continuous(
|
451 |
+
X_ordinal,
|
452 |
+
y_class,
|
453 |
+
n_categories,
|
454 |
+
target_mean[i],
|
455 |
+
)
|
456 |
+
encodings.extend(encoding)
|
457 |
+
|
458 |
+
reorder_index = (
|
459 |
+
idx
|
460 |
+
for start in range(n_features)
|
461 |
+
for idx in range(start, (n_classes * n_features), n_features)
|
462 |
+
)
|
463 |
+
return [encodings[idx] for idx in reorder_index]
|
464 |
+
|
465 |
+
def _transform_X_ordinal(
|
466 |
+
self,
|
467 |
+
X_out,
|
468 |
+
X_ordinal,
|
469 |
+
X_unknown_mask,
|
470 |
+
row_indices,
|
471 |
+
encodings,
|
472 |
+
target_mean,
|
473 |
+
):
|
474 |
+
"""Transform X_ordinal using encodings.
|
475 |
+
|
476 |
+
In the multiclass case, `X_ordinal` and `X_unknown_mask` have column
|
477 |
+
(axis=1) size `n_features`, while `encodings` has length of size
|
478 |
+
`n_features * n_classes`. `feat_idx` deals with this by repeating
|
479 |
+
feature indices by `n_classes` E.g., for 3 features, 2 classes:
|
480 |
+
0,0,1,1,2,2
|
481 |
+
|
482 |
+
Additionally, `target_mean` is of shape (`n_classes`,) so `mean_idx`
|
483 |
+
cycles through 0 to `n_classes` - 1, `n_features` times.
|
484 |
+
"""
|
485 |
+
if self.target_type_ == "multiclass":
|
486 |
+
n_classes = len(self.classes_)
|
487 |
+
for e_idx, encoding in enumerate(encodings):
|
488 |
+
# Repeat feature indices by n_classes
|
489 |
+
feat_idx = e_idx // n_classes
|
490 |
+
# Cycle through each class
|
491 |
+
mean_idx = e_idx % n_classes
|
492 |
+
X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, feat_idx]]
|
493 |
+
X_out[X_unknown_mask[:, feat_idx], e_idx] = target_mean[mean_idx]
|
494 |
+
else:
|
495 |
+
for e_idx, encoding in enumerate(encodings):
|
496 |
+
X_out[row_indices, e_idx] = encoding[X_ordinal[row_indices, e_idx]]
|
497 |
+
X_out[X_unknown_mask[:, e_idx], e_idx] = target_mean
|
498 |
+
|
499 |
+
def get_feature_names_out(self, input_features=None):
|
500 |
+
"""Get output feature names for transformation.
|
501 |
+
|
502 |
+
Parameters
|
503 |
+
----------
|
504 |
+
input_features : array-like of str or None, default=None
|
505 |
+
Not used, present here for API consistency by convention.
|
506 |
+
|
507 |
+
Returns
|
508 |
+
-------
|
509 |
+
feature_names_out : ndarray of str objects
|
510 |
+
Transformed feature names. `feature_names_in_` is used unless it is
|
511 |
+
not defined, in which case the following input feature names are
|
512 |
+
generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
|
513 |
+
When `type_of_target_` is "multiclass" the names are of the format
|
514 |
+
'<feature_name>_<class_name>'.
|
515 |
+
"""
|
516 |
+
check_is_fitted(self, "n_features_in_")
|
517 |
+
feature_names = _check_feature_names_in(self, input_features)
|
518 |
+
if self.target_type_ == "multiclass":
|
519 |
+
feature_names = [
|
520 |
+
f"{feature_name}_{class_name}"
|
521 |
+
for feature_name in feature_names
|
522 |
+
for class_name in self.classes_
|
523 |
+
]
|
524 |
+
return np.asarray(feature_names, dtype=object)
|
525 |
+
else:
|
526 |
+
return feature_names
|
527 |
+
|
528 |
+
def _more_tags(self):
|
529 |
+
return {
|
530 |
+
"requires_y": True,
|
531 |
+
}
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/_target_encoder_fast.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (570 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_data.py
ADDED
@@ -0,0 +1,2593 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Authors:
|
2 |
+
#
|
3 |
+
# Giorgio Patrini
|
4 |
+
#
|
5 |
+
# License: BSD 3 clause
|
6 |
+
|
7 |
+
import re
|
8 |
+
import warnings
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import numpy.linalg as la
|
12 |
+
import pytest
|
13 |
+
from scipy import sparse, stats
|
14 |
+
|
15 |
+
from sklearn import datasets
|
16 |
+
from sklearn.base import clone
|
17 |
+
from sklearn.exceptions import NotFittedError
|
18 |
+
from sklearn.metrics.pairwise import linear_kernel
|
19 |
+
from sklearn.model_selection import cross_val_predict
|
20 |
+
from sklearn.pipeline import Pipeline
|
21 |
+
from sklearn.preprocessing import (
|
22 |
+
Binarizer,
|
23 |
+
KernelCenterer,
|
24 |
+
MaxAbsScaler,
|
25 |
+
MinMaxScaler,
|
26 |
+
Normalizer,
|
27 |
+
PowerTransformer,
|
28 |
+
QuantileTransformer,
|
29 |
+
RobustScaler,
|
30 |
+
StandardScaler,
|
31 |
+
add_dummy_feature,
|
32 |
+
maxabs_scale,
|
33 |
+
minmax_scale,
|
34 |
+
normalize,
|
35 |
+
power_transform,
|
36 |
+
quantile_transform,
|
37 |
+
robust_scale,
|
38 |
+
scale,
|
39 |
+
)
|
40 |
+
from sklearn.preprocessing._data import BOUNDS_THRESHOLD, _handle_zeros_in_scale
|
41 |
+
from sklearn.svm import SVR
|
42 |
+
from sklearn.utils import gen_batches, shuffle
|
43 |
+
from sklearn.utils._array_api import (
|
44 |
+
yield_namespace_device_dtype_combinations,
|
45 |
+
)
|
46 |
+
from sklearn.utils._testing import (
|
47 |
+
_convert_container,
|
48 |
+
assert_allclose,
|
49 |
+
assert_allclose_dense_sparse,
|
50 |
+
assert_almost_equal,
|
51 |
+
assert_array_almost_equal,
|
52 |
+
assert_array_equal,
|
53 |
+
assert_array_less,
|
54 |
+
skip_if_32bit,
|
55 |
+
)
|
56 |
+
from sklearn.utils.estimator_checks import (
|
57 |
+
_get_check_estimator_ids,
|
58 |
+
check_array_api_input_and_values,
|
59 |
+
)
|
60 |
+
from sklearn.utils.fixes import (
|
61 |
+
COO_CONTAINERS,
|
62 |
+
CSC_CONTAINERS,
|
63 |
+
CSR_CONTAINERS,
|
64 |
+
LIL_CONTAINERS,
|
65 |
+
)
|
66 |
+
from sklearn.utils.sparsefuncs import mean_variance_axis
|
67 |
+
|
68 |
+
iris = datasets.load_iris()
|
69 |
+
|
70 |
+
# Make some data to be used many times
|
71 |
+
rng = np.random.RandomState(0)
|
72 |
+
n_features = 30
|
73 |
+
n_samples = 1000
|
74 |
+
offsets = rng.uniform(-1, 1, size=n_features)
|
75 |
+
scales = rng.uniform(1, 10, size=n_features)
|
76 |
+
X_2d = rng.randn(n_samples, n_features) * scales + offsets
|
77 |
+
X_1row = X_2d[0, :].reshape(1, n_features)
|
78 |
+
X_1col = X_2d[:, 0].reshape(n_samples, 1)
|
79 |
+
X_list_1row = X_1row.tolist()
|
80 |
+
X_list_1col = X_1col.tolist()
|
81 |
+
|
82 |
+
|
83 |
+
def toarray(a):
|
84 |
+
if hasattr(a, "toarray"):
|
85 |
+
a = a.toarray()
|
86 |
+
return a
|
87 |
+
|
88 |
+
|
89 |
+
def _check_dim_1axis(a):
|
90 |
+
return np.asarray(a).shape[0]
|
91 |
+
|
92 |
+
|
93 |
+
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size, n_samples_seen):
|
94 |
+
if batch_stop != n:
|
95 |
+
assert (i + 1) * chunk_size == n_samples_seen
|
96 |
+
else:
|
97 |
+
assert i * chunk_size + (batch_stop - batch_start) == n_samples_seen
|
98 |
+
|
99 |
+
|
100 |
+
def test_raises_value_error_if_sample_weights_greater_than_1d():
|
101 |
+
# Sample weights must be either scalar or 1D
|
102 |
+
|
103 |
+
n_sampless = [2, 3]
|
104 |
+
n_featuress = [3, 2]
|
105 |
+
|
106 |
+
for n_samples, n_features in zip(n_sampless, n_featuress):
|
107 |
+
X = rng.randn(n_samples, n_features)
|
108 |
+
y = rng.randn(n_samples)
|
109 |
+
|
110 |
+
scaler = StandardScaler()
|
111 |
+
|
112 |
+
# make sure Error is raised the sample weights greater than 1d
|
113 |
+
sample_weight_notOK = rng.randn(n_samples, 1) ** 2
|
114 |
+
with pytest.raises(ValueError):
|
115 |
+
scaler.fit(X, y, sample_weight=sample_weight_notOK)
|
116 |
+
|
117 |
+
|
118 |
+
@pytest.mark.parametrize(
|
119 |
+
["Xw", "X", "sample_weight"],
|
120 |
+
[
|
121 |
+
([[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [1, 2, 3], [4, 5, 6]], [2.0, 1.0]),
|
122 |
+
(
|
123 |
+
[[1, 0, 1], [0, 0, 1]],
|
124 |
+
[[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
|
125 |
+
np.array([1, 3]),
|
126 |
+
),
|
127 |
+
(
|
128 |
+
[[1, np.nan, 1], [np.nan, np.nan, 1]],
|
129 |
+
[
|
130 |
+
[1, np.nan, 1],
|
131 |
+
[np.nan, np.nan, 1],
|
132 |
+
[np.nan, np.nan, 1],
|
133 |
+
[np.nan, np.nan, 1],
|
134 |
+
],
|
135 |
+
np.array([1, 3]),
|
136 |
+
),
|
137 |
+
],
|
138 |
+
)
|
139 |
+
@pytest.mark.parametrize("array_constructor", ["array", "sparse_csr", "sparse_csc"])
|
140 |
+
def test_standard_scaler_sample_weight(Xw, X, sample_weight, array_constructor):
|
141 |
+
with_mean = not array_constructor.startswith("sparse")
|
142 |
+
X = _convert_container(X, array_constructor)
|
143 |
+
Xw = _convert_container(Xw, array_constructor)
|
144 |
+
|
145 |
+
# weighted StandardScaler
|
146 |
+
yw = np.ones(Xw.shape[0])
|
147 |
+
scaler_w = StandardScaler(with_mean=with_mean)
|
148 |
+
scaler_w.fit(Xw, yw, sample_weight=sample_weight)
|
149 |
+
|
150 |
+
# unweighted, but with repeated samples
|
151 |
+
y = np.ones(X.shape[0])
|
152 |
+
scaler = StandardScaler(with_mean=with_mean)
|
153 |
+
scaler.fit(X, y)
|
154 |
+
|
155 |
+
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
|
156 |
+
|
157 |
+
assert_almost_equal(scaler.mean_, scaler_w.mean_)
|
158 |
+
assert_almost_equal(scaler.var_, scaler_w.var_)
|
159 |
+
assert_almost_equal(scaler.transform(X_test), scaler_w.transform(X_test))
|
160 |
+
|
161 |
+
|
162 |
+
def test_standard_scaler_1d():
|
163 |
+
# Test scaling of dataset along single axis
|
164 |
+
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
|
165 |
+
scaler = StandardScaler()
|
166 |
+
X_scaled = scaler.fit(X).transform(X, copy=True)
|
167 |
+
|
168 |
+
if isinstance(X, list):
|
169 |
+
X = np.array(X) # cast only after scaling done
|
170 |
+
|
171 |
+
if _check_dim_1axis(X) == 1:
|
172 |
+
assert_almost_equal(scaler.mean_, X.ravel())
|
173 |
+
assert_almost_equal(scaler.scale_, np.ones(n_features))
|
174 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features))
|
175 |
+
assert_array_almost_equal(X_scaled.std(axis=0), np.zeros_like(n_features))
|
176 |
+
else:
|
177 |
+
assert_almost_equal(scaler.mean_, X.mean())
|
178 |
+
assert_almost_equal(scaler.scale_, X.std())
|
179 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features))
|
180 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
|
181 |
+
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
|
182 |
+
assert scaler.n_samples_seen_ == X.shape[0]
|
183 |
+
|
184 |
+
# check inverse transform
|
185 |
+
X_scaled_back = scaler.inverse_transform(X_scaled)
|
186 |
+
assert_array_almost_equal(X_scaled_back, X)
|
187 |
+
|
188 |
+
# Constant feature
|
189 |
+
X = np.ones((5, 1))
|
190 |
+
scaler = StandardScaler()
|
191 |
+
X_scaled = scaler.fit(X).transform(X, copy=True)
|
192 |
+
assert_almost_equal(scaler.mean_, 1.0)
|
193 |
+
assert_almost_equal(scaler.scale_, 1.0)
|
194 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
|
195 |
+
assert_array_almost_equal(X_scaled.std(axis=0), 0.0)
|
196 |
+
assert scaler.n_samples_seen_ == X.shape[0]
|
197 |
+
|
198 |
+
|
199 |
+
@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS)
|
200 |
+
@pytest.mark.parametrize("add_sample_weight", [False, True])
|
201 |
+
def test_standard_scaler_dtype(add_sample_weight, sparse_container):
|
202 |
+
# Ensure scaling does not affect dtype
|
203 |
+
rng = np.random.RandomState(0)
|
204 |
+
n_samples = 10
|
205 |
+
n_features = 3
|
206 |
+
if add_sample_weight:
|
207 |
+
sample_weight = np.ones(n_samples)
|
208 |
+
else:
|
209 |
+
sample_weight = None
|
210 |
+
with_mean = True
|
211 |
+
if sparse_container is not None:
|
212 |
+
# scipy sparse containers do not support float16, see
|
213 |
+
# https://github.com/scipy/scipy/issues/7408 for more details.
|
214 |
+
supported_dtype = [np.float64, np.float32]
|
215 |
+
else:
|
216 |
+
supported_dtype = [np.float64, np.float32, np.float16]
|
217 |
+
for dtype in supported_dtype:
|
218 |
+
X = rng.randn(n_samples, n_features).astype(dtype)
|
219 |
+
if sparse_container is not None:
|
220 |
+
X = sparse_container(X)
|
221 |
+
with_mean = False
|
222 |
+
|
223 |
+
scaler = StandardScaler(with_mean=with_mean)
|
224 |
+
X_scaled = scaler.fit(X, sample_weight=sample_weight).transform(X)
|
225 |
+
assert X.dtype == X_scaled.dtype
|
226 |
+
assert scaler.mean_.dtype == np.float64
|
227 |
+
assert scaler.scale_.dtype == np.float64
|
228 |
+
|
229 |
+
|
230 |
+
@pytest.mark.parametrize(
|
231 |
+
"scaler",
|
232 |
+
[
|
233 |
+
StandardScaler(with_mean=False),
|
234 |
+
RobustScaler(with_centering=False),
|
235 |
+
],
|
236 |
+
)
|
237 |
+
@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS)
|
238 |
+
@pytest.mark.parametrize("add_sample_weight", [False, True])
|
239 |
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
240 |
+
@pytest.mark.parametrize("constant", [0, 1.0, 100.0])
|
241 |
+
def test_standard_scaler_constant_features(
|
242 |
+
scaler, add_sample_weight, sparse_container, dtype, constant
|
243 |
+
):
|
244 |
+
if isinstance(scaler, RobustScaler) and add_sample_weight:
|
245 |
+
pytest.skip(f"{scaler.__class__.__name__} does not yet support sample_weight")
|
246 |
+
|
247 |
+
rng = np.random.RandomState(0)
|
248 |
+
n_samples = 100
|
249 |
+
n_features = 1
|
250 |
+
if add_sample_weight:
|
251 |
+
fit_params = dict(sample_weight=rng.uniform(size=n_samples) * 2)
|
252 |
+
else:
|
253 |
+
fit_params = {}
|
254 |
+
X_array = np.full(shape=(n_samples, n_features), fill_value=constant, dtype=dtype)
|
255 |
+
X = X_array if sparse_container is None else sparse_container(X_array)
|
256 |
+
X_scaled = scaler.fit(X, **fit_params).transform(X)
|
257 |
+
|
258 |
+
if isinstance(scaler, StandardScaler):
|
259 |
+
# The variance info should be close to zero for constant features.
|
260 |
+
assert_allclose(scaler.var_, np.zeros(X.shape[1]), atol=1e-7)
|
261 |
+
|
262 |
+
# Constant features should not be scaled (scale of 1.):
|
263 |
+
assert_allclose(scaler.scale_, np.ones(X.shape[1]))
|
264 |
+
|
265 |
+
assert X_scaled is not X # make sure we make a copy
|
266 |
+
assert_allclose_dense_sparse(X_scaled, X)
|
267 |
+
|
268 |
+
if isinstance(scaler, StandardScaler) and not add_sample_weight:
|
269 |
+
# Also check consistency with the standard scale function.
|
270 |
+
X_scaled_2 = scale(X, with_mean=scaler.with_mean)
|
271 |
+
assert X_scaled_2 is not X # make sure we did a copy
|
272 |
+
assert_allclose_dense_sparse(X_scaled_2, X)
|
273 |
+
|
274 |
+
|
275 |
+
@pytest.mark.parametrize("n_samples", [10, 100, 10_000])
|
276 |
+
@pytest.mark.parametrize("average", [1e-10, 1, 1e10])
|
277 |
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
278 |
+
@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS)
|
279 |
+
def test_standard_scaler_near_constant_features(
|
280 |
+
n_samples, sparse_container, average, dtype
|
281 |
+
):
|
282 |
+
# Check that when the variance is too small (var << mean**2) the feature
|
283 |
+
# is considered constant and not scaled.
|
284 |
+
|
285 |
+
scale_min, scale_max = -30, 19
|
286 |
+
scales = np.array([10**i for i in range(scale_min, scale_max + 1)], dtype=dtype)
|
287 |
+
|
288 |
+
n_features = scales.shape[0]
|
289 |
+
X = np.empty((n_samples, n_features), dtype=dtype)
|
290 |
+
# Make a dataset of known var = scales**2 and mean = average
|
291 |
+
X[: n_samples // 2, :] = average + scales
|
292 |
+
X[n_samples // 2 :, :] = average - scales
|
293 |
+
X_array = X if sparse_container is None else sparse_container(X)
|
294 |
+
|
295 |
+
scaler = StandardScaler(with_mean=False).fit(X_array)
|
296 |
+
|
297 |
+
# StandardScaler uses float64 accumulators even if the data has a float32
|
298 |
+
# dtype.
|
299 |
+
eps = np.finfo(np.float64).eps
|
300 |
+
|
301 |
+
# if var < bound = N.eps.var + N².eps².mean², the feature is considered
|
302 |
+
# constant and the scale_ attribute is set to 1.
|
303 |
+
bounds = n_samples * eps * scales**2 + n_samples**2 * eps**2 * average**2
|
304 |
+
within_bounds = scales**2 <= bounds
|
305 |
+
|
306 |
+
# Check that scale_min is small enough to have some scales below the
|
307 |
+
# bound and therefore detected as constant:
|
308 |
+
assert np.any(within_bounds)
|
309 |
+
|
310 |
+
# Check that such features are actually treated as constant by the scaler:
|
311 |
+
assert all(scaler.var_[within_bounds] <= bounds[within_bounds])
|
312 |
+
assert_allclose(scaler.scale_[within_bounds], 1.0)
|
313 |
+
|
314 |
+
# Depending the on the dtype of X, some features might not actually be
|
315 |
+
# representable as non constant for small scales (even if above the
|
316 |
+
# precision bound of the float64 variance estimate). Such feature should
|
317 |
+
# be correctly detected as constants with 0 variance by StandardScaler.
|
318 |
+
representable_diff = X[0, :] - X[-1, :] != 0
|
319 |
+
assert_allclose(scaler.var_[np.logical_not(representable_diff)], 0)
|
320 |
+
assert_allclose(scaler.scale_[np.logical_not(representable_diff)], 1)
|
321 |
+
|
322 |
+
# The other features are scaled and scale_ is equal to sqrt(var_) assuming
|
323 |
+
# that scales are large enough for average + scale and average - scale to
|
324 |
+
# be distinct in X (depending on X's dtype).
|
325 |
+
common_mask = np.logical_and(scales**2 > bounds, representable_diff)
|
326 |
+
assert_allclose(scaler.scale_[common_mask], np.sqrt(scaler.var_)[common_mask])
|
327 |
+
|
328 |
+
|
329 |
+
def test_scale_1d():
|
330 |
+
# 1-d inputs
|
331 |
+
X_list = [1.0, 3.0, 5.0, 0.0]
|
332 |
+
X_arr = np.array(X_list)
|
333 |
+
|
334 |
+
for X in [X_list, X_arr]:
|
335 |
+
X_scaled = scale(X)
|
336 |
+
assert_array_almost_equal(X_scaled.mean(), 0.0)
|
337 |
+
assert_array_almost_equal(X_scaled.std(), 1.0)
|
338 |
+
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
|
339 |
+
|
340 |
+
|
341 |
+
@skip_if_32bit
|
342 |
+
def test_standard_scaler_numerical_stability():
|
343 |
+
# Test numerical stability of scaling
|
344 |
+
# np.log(1e-5) is taken because of its floating point representation
|
345 |
+
# was empirically found to cause numerical problems with np.mean & np.std.
|
346 |
+
x = np.full(8, np.log(1e-5), dtype=np.float64)
|
347 |
+
# This does not raise a warning as the number of samples is too low
|
348 |
+
# to trigger the problem in recent numpy
|
349 |
+
with warnings.catch_warnings():
|
350 |
+
warnings.simplefilter("error", UserWarning)
|
351 |
+
scale(x)
|
352 |
+
assert_array_almost_equal(scale(x), np.zeros(8))
|
353 |
+
|
354 |
+
# with 2 more samples, the std computation run into numerical issues:
|
355 |
+
x = np.full(10, np.log(1e-5), dtype=np.float64)
|
356 |
+
warning_message = "standard deviation of the data is probably very close to 0"
|
357 |
+
with pytest.warns(UserWarning, match=warning_message):
|
358 |
+
x_scaled = scale(x)
|
359 |
+
assert_array_almost_equal(x_scaled, np.zeros(10))
|
360 |
+
|
361 |
+
x = np.full(10, 1e-100, dtype=np.float64)
|
362 |
+
with warnings.catch_warnings():
|
363 |
+
warnings.simplefilter("error", UserWarning)
|
364 |
+
x_small_scaled = scale(x)
|
365 |
+
assert_array_almost_equal(x_small_scaled, np.zeros(10))
|
366 |
+
|
367 |
+
# Large values can cause (often recoverable) numerical stability issues:
|
368 |
+
x_big = np.full(10, 1e100, dtype=np.float64)
|
369 |
+
warning_message = "Dataset may contain too large values"
|
370 |
+
with pytest.warns(UserWarning, match=warning_message):
|
371 |
+
x_big_scaled = scale(x_big)
|
372 |
+
assert_array_almost_equal(x_big_scaled, np.zeros(10))
|
373 |
+
assert_array_almost_equal(x_big_scaled, x_small_scaled)
|
374 |
+
with pytest.warns(UserWarning, match=warning_message):
|
375 |
+
x_big_centered = scale(x_big, with_std=False)
|
376 |
+
assert_array_almost_equal(x_big_centered, np.zeros(10))
|
377 |
+
assert_array_almost_equal(x_big_centered, x_small_scaled)
|
378 |
+
|
379 |
+
|
380 |
+
def test_scaler_2d_arrays():
|
381 |
+
# Test scaling of 2d array along first axis
|
382 |
+
rng = np.random.RandomState(0)
|
383 |
+
n_features = 5
|
384 |
+
n_samples = 4
|
385 |
+
X = rng.randn(n_samples, n_features)
|
386 |
+
X[:, 0] = 0.0 # first feature is always of zero
|
387 |
+
|
388 |
+
scaler = StandardScaler()
|
389 |
+
X_scaled = scaler.fit(X).transform(X, copy=True)
|
390 |
+
assert not np.any(np.isnan(X_scaled))
|
391 |
+
assert scaler.n_samples_seen_ == n_samples
|
392 |
+
|
393 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
|
394 |
+
assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0])
|
395 |
+
# Check that X has been copied
|
396 |
+
assert X_scaled is not X
|
397 |
+
|
398 |
+
# check inverse transform
|
399 |
+
X_scaled_back = scaler.inverse_transform(X_scaled)
|
400 |
+
assert X_scaled_back is not X
|
401 |
+
assert X_scaled_back is not X_scaled
|
402 |
+
assert_array_almost_equal(X_scaled_back, X)
|
403 |
+
|
404 |
+
X_scaled = scale(X, axis=1, with_std=False)
|
405 |
+
assert not np.any(np.isnan(X_scaled))
|
406 |
+
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
|
407 |
+
X_scaled = scale(X, axis=1, with_std=True)
|
408 |
+
assert not np.any(np.isnan(X_scaled))
|
409 |
+
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
|
410 |
+
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
|
411 |
+
# Check that the data hasn't been modified
|
412 |
+
assert X_scaled is not X
|
413 |
+
|
414 |
+
X_scaled = scaler.fit(X).transform(X, copy=False)
|
415 |
+
assert not np.any(np.isnan(X_scaled))
|
416 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
|
417 |
+
assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0])
|
418 |
+
# Check that X has not been copied
|
419 |
+
assert X_scaled is X
|
420 |
+
|
421 |
+
X = rng.randn(4, 5)
|
422 |
+
X[:, 0] = 1.0 # first feature is a constant, non zero feature
|
423 |
+
scaler = StandardScaler()
|
424 |
+
X_scaled = scaler.fit(X).transform(X, copy=True)
|
425 |
+
assert not np.any(np.isnan(X_scaled))
|
426 |
+
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
|
427 |
+
assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0])
|
428 |
+
# Check that X has not been copied
|
429 |
+
assert X_scaled is not X
|
430 |
+
|
431 |
+
|
432 |
+
def test_scaler_float16_overflow():
|
433 |
+
# Test if the scaler will not overflow on float16 numpy arrays
|
434 |
+
rng = np.random.RandomState(0)
|
435 |
+
# float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000
|
436 |
+
# which is enough to overflow the data type
|
437 |
+
X = rng.uniform(5, 10, [200000, 1]).astype(np.float16)
|
438 |
+
|
439 |
+
with np.errstate(over="raise"):
|
440 |
+
scaler = StandardScaler().fit(X)
|
441 |
+
X_scaled = scaler.transform(X)
|
442 |
+
|
443 |
+
# Calculate the float64 equivalent to verify result
|
444 |
+
X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64))
|
445 |
+
|
446 |
+
# Overflow calculations may cause -inf, inf, or nan. Since there is no nan
|
447 |
+
# input, all of the outputs should be finite. This may be redundant since a
|
448 |
+
# FloatingPointError exception will be thrown on overflow above.
|
449 |
+
assert np.all(np.isfinite(X_scaled))
|
450 |
+
|
451 |
+
# The normal distribution is very unlikely to go above 4. At 4.0-8.0 the
|
452 |
+
# float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are
|
453 |
+
# checked to account for precision differences.
|
454 |
+
assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2)
|
455 |
+
|
456 |
+
|
457 |
+
def test_handle_zeros_in_scale():
|
458 |
+
s1 = np.array([0, 1e-16, 1, 2, 3])
|
459 |
+
s2 = _handle_zeros_in_scale(s1, copy=True)
|
460 |
+
|
461 |
+
assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3]))
|
462 |
+
assert_allclose(s2, np.array([1, 1, 1, 2, 3]))
|
463 |
+
|
464 |
+
|
465 |
+
def test_minmax_scaler_partial_fit():
|
466 |
+
# Test if partial_fit run over many batches of size 1 and 50
|
467 |
+
# gives the same results as fit
|
468 |
+
X = X_2d
|
469 |
+
n = X.shape[0]
|
470 |
+
|
471 |
+
for chunk_size in [1, 2, 50, n, n + 42]:
|
472 |
+
# Test mean at the end of the process
|
473 |
+
scaler_batch = MinMaxScaler().fit(X)
|
474 |
+
|
475 |
+
scaler_incr = MinMaxScaler()
|
476 |
+
for batch in gen_batches(n_samples, chunk_size):
|
477 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
478 |
+
|
479 |
+
assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_)
|
480 |
+
assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_)
|
481 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
482 |
+
assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_)
|
483 |
+
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
|
484 |
+
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
|
485 |
+
|
486 |
+
# Test std after 1 step
|
487 |
+
batch0 = slice(0, chunk_size)
|
488 |
+
scaler_batch = MinMaxScaler().fit(X[batch0])
|
489 |
+
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
|
490 |
+
|
491 |
+
assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_)
|
492 |
+
assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_)
|
493 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
494 |
+
assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_)
|
495 |
+
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
|
496 |
+
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
|
497 |
+
|
498 |
+
# Test std until the end of partial fits, and
|
499 |
+
scaler_batch = MinMaxScaler().fit(X)
|
500 |
+
scaler_incr = MinMaxScaler() # Clean estimator
|
501 |
+
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
|
502 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
503 |
+
assert_correct_incr(
|
504 |
+
i,
|
505 |
+
batch_start=batch.start,
|
506 |
+
batch_stop=batch.stop,
|
507 |
+
n=n,
|
508 |
+
chunk_size=chunk_size,
|
509 |
+
n_samples_seen=scaler_incr.n_samples_seen_,
|
510 |
+
)
|
511 |
+
|
512 |
+
|
513 |
+
def test_standard_scaler_partial_fit():
|
514 |
+
# Test if partial_fit run over many batches of size 1 and 50
|
515 |
+
# gives the same results as fit
|
516 |
+
X = X_2d
|
517 |
+
n = X.shape[0]
|
518 |
+
|
519 |
+
for chunk_size in [1, 2, 50, n, n + 42]:
|
520 |
+
# Test mean at the end of the process
|
521 |
+
scaler_batch = StandardScaler(with_std=False).fit(X)
|
522 |
+
|
523 |
+
scaler_incr = StandardScaler(with_std=False)
|
524 |
+
for batch in gen_batches(n_samples, chunk_size):
|
525 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
526 |
+
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
|
527 |
+
assert scaler_batch.var_ == scaler_incr.var_ # Nones
|
528 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
529 |
+
|
530 |
+
# Test std after 1 step
|
531 |
+
batch0 = slice(0, chunk_size)
|
532 |
+
scaler_incr = StandardScaler().partial_fit(X[batch0])
|
533 |
+
if chunk_size == 1:
|
534 |
+
assert_array_almost_equal(
|
535 |
+
np.zeros(n_features, dtype=np.float64), scaler_incr.var_
|
536 |
+
)
|
537 |
+
assert_array_almost_equal(
|
538 |
+
np.ones(n_features, dtype=np.float64), scaler_incr.scale_
|
539 |
+
)
|
540 |
+
else:
|
541 |
+
assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_)
|
542 |
+
assert_array_almost_equal(
|
543 |
+
np.std(X[batch0], axis=0), scaler_incr.scale_
|
544 |
+
) # no constants
|
545 |
+
|
546 |
+
# Test std until the end of partial fits, and
|
547 |
+
scaler_batch = StandardScaler().fit(X)
|
548 |
+
scaler_incr = StandardScaler() # Clean estimator
|
549 |
+
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
|
550 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
551 |
+
assert_correct_incr(
|
552 |
+
i,
|
553 |
+
batch_start=batch.start,
|
554 |
+
batch_stop=batch.stop,
|
555 |
+
n=n,
|
556 |
+
chunk_size=chunk_size,
|
557 |
+
n_samples_seen=scaler_incr.n_samples_seen_,
|
558 |
+
)
|
559 |
+
|
560 |
+
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
|
561 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
562 |
+
|
563 |
+
|
564 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
565 |
+
def test_standard_scaler_partial_fit_numerical_stability(sparse_container):
|
566 |
+
# Test if the incremental computation introduces significative errors
|
567 |
+
# for large datasets with values of large magniture
|
568 |
+
rng = np.random.RandomState(0)
|
569 |
+
n_features = 2
|
570 |
+
n_samples = 100
|
571 |
+
offsets = rng.uniform(-1e15, 1e15, size=n_features)
|
572 |
+
scales = rng.uniform(1e3, 1e6, size=n_features)
|
573 |
+
X = rng.randn(n_samples, n_features) * scales + offsets
|
574 |
+
|
575 |
+
scaler_batch = StandardScaler().fit(X)
|
576 |
+
scaler_incr = StandardScaler()
|
577 |
+
for chunk in X:
|
578 |
+
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
|
579 |
+
|
580 |
+
# Regardless of abs values, they must not be more diff 6 significant digits
|
581 |
+
tol = 10 ** (-6)
|
582 |
+
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
|
583 |
+
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
|
584 |
+
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
|
585 |
+
# NOTE Be aware that for much larger offsets std is very unstable (last
|
586 |
+
# assert) while mean is OK.
|
587 |
+
|
588 |
+
# Sparse input
|
589 |
+
size = (100, 3)
|
590 |
+
scale = 1e20
|
591 |
+
X = sparse_container(rng.randint(0, 2, size).astype(np.float64) * scale)
|
592 |
+
|
593 |
+
# with_mean=False is required with sparse input
|
594 |
+
scaler = StandardScaler(with_mean=False).fit(X)
|
595 |
+
scaler_incr = StandardScaler(with_mean=False)
|
596 |
+
|
597 |
+
for chunk in X:
|
598 |
+
scaler_incr = scaler_incr.partial_fit(chunk)
|
599 |
+
|
600 |
+
# Regardless of magnitude, they must not differ more than of 6 digits
|
601 |
+
tol = 10 ** (-6)
|
602 |
+
assert scaler.mean_ is not None
|
603 |
+
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
|
604 |
+
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
|
605 |
+
|
606 |
+
|
607 |
+
@pytest.mark.parametrize("sample_weight", [True, None])
|
608 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
609 |
+
def test_partial_fit_sparse_input(sample_weight, sparse_container):
|
610 |
+
# Check that sparsity is not destroyed
|
611 |
+
X = sparse_container(np.array([[1.0], [0.0], [0.0], [5.0]]))
|
612 |
+
|
613 |
+
if sample_weight:
|
614 |
+
sample_weight = rng.rand(X.shape[0])
|
615 |
+
|
616 |
+
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
|
617 |
+
X_null = null_transform.partial_fit(X, sample_weight=sample_weight).transform(X)
|
618 |
+
assert_array_equal(X_null.toarray(), X.toarray())
|
619 |
+
X_orig = null_transform.inverse_transform(X_null)
|
620 |
+
assert_array_equal(X_orig.toarray(), X_null.toarray())
|
621 |
+
assert_array_equal(X_orig.toarray(), X.toarray())
|
622 |
+
|
623 |
+
|
624 |
+
@pytest.mark.parametrize("sample_weight", [True, None])
|
625 |
+
def test_standard_scaler_trasform_with_partial_fit(sample_weight):
|
626 |
+
# Check some postconditions after applying partial_fit and transform
|
627 |
+
X = X_2d[:100, :]
|
628 |
+
|
629 |
+
if sample_weight:
|
630 |
+
sample_weight = rng.rand(X.shape[0])
|
631 |
+
|
632 |
+
scaler_incr = StandardScaler()
|
633 |
+
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
|
634 |
+
X_sofar = X[: (i + 1), :]
|
635 |
+
chunks_copy = X_sofar.copy()
|
636 |
+
if sample_weight is None:
|
637 |
+
scaled_batch = StandardScaler().fit_transform(X_sofar)
|
638 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
639 |
+
else:
|
640 |
+
scaled_batch = StandardScaler().fit_transform(
|
641 |
+
X_sofar, sample_weight=sample_weight[: i + 1]
|
642 |
+
)
|
643 |
+
scaler_incr = scaler_incr.partial_fit(
|
644 |
+
X[batch], sample_weight=sample_weight[batch]
|
645 |
+
)
|
646 |
+
scaled_incr = scaler_incr.transform(X_sofar)
|
647 |
+
|
648 |
+
assert_array_almost_equal(scaled_batch, scaled_incr)
|
649 |
+
assert_array_almost_equal(X_sofar, chunks_copy) # No change
|
650 |
+
right_input = scaler_incr.inverse_transform(scaled_incr)
|
651 |
+
assert_array_almost_equal(X_sofar, right_input)
|
652 |
+
|
653 |
+
zero = np.zeros(X.shape[1])
|
654 |
+
epsilon = np.finfo(float).eps
|
655 |
+
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
|
656 |
+
assert_array_less(zero, scaler_incr.scale_ + epsilon)
|
657 |
+
if sample_weight is None:
|
658 |
+
# (i+1) because the Scaler has been already fitted
|
659 |
+
assert (i + 1) == scaler_incr.n_samples_seen_
|
660 |
+
else:
|
661 |
+
assert np.sum(sample_weight[: i + 1]) == pytest.approx(
|
662 |
+
scaler_incr.n_samples_seen_
|
663 |
+
)
|
664 |
+
|
665 |
+
|
666 |
+
def test_standard_check_array_of_inverse_transform():
|
667 |
+
# Check if StandardScaler inverse_transform is
|
668 |
+
# converting the integer array to float
|
669 |
+
x = np.array(
|
670 |
+
[
|
671 |
+
[1, 1, 1, 0, 1, 0],
|
672 |
+
[1, 1, 1, 0, 1, 0],
|
673 |
+
[0, 8, 0, 1, 0, 0],
|
674 |
+
[1, 4, 1, 1, 0, 0],
|
675 |
+
[0, 1, 0, 0, 1, 0],
|
676 |
+
[0, 4, 0, 1, 0, 1],
|
677 |
+
],
|
678 |
+
dtype=np.int32,
|
679 |
+
)
|
680 |
+
|
681 |
+
scaler = StandardScaler()
|
682 |
+
scaler.fit(x)
|
683 |
+
|
684 |
+
# The of inverse_transform should be converted
|
685 |
+
# to a float array.
|
686 |
+
# If not X *= self.scale_ will fail.
|
687 |
+
scaler.inverse_transform(x)
|
688 |
+
|
689 |
+
|
690 |
+
@pytest.mark.parametrize(
|
691 |
+
"array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
|
692 |
+
)
|
693 |
+
@pytest.mark.parametrize(
|
694 |
+
"check",
|
695 |
+
[check_array_api_input_and_values],
|
696 |
+
ids=_get_check_estimator_ids,
|
697 |
+
)
|
698 |
+
@pytest.mark.parametrize(
|
699 |
+
"estimator",
|
700 |
+
[
|
701 |
+
MaxAbsScaler(),
|
702 |
+
MinMaxScaler(),
|
703 |
+
KernelCenterer(),
|
704 |
+
Normalizer(norm="l1"),
|
705 |
+
Normalizer(norm="l2"),
|
706 |
+
Normalizer(norm="max"),
|
707 |
+
],
|
708 |
+
ids=_get_check_estimator_ids,
|
709 |
+
)
|
710 |
+
def test_scaler_array_api_compliance(
|
711 |
+
estimator, check, array_namespace, device, dtype_name
|
712 |
+
):
|
713 |
+
name = estimator.__class__.__name__
|
714 |
+
check(name, estimator, array_namespace, device=device, dtype_name=dtype_name)
|
715 |
+
|
716 |
+
|
717 |
+
def test_min_max_scaler_iris():
|
718 |
+
X = iris.data
|
719 |
+
scaler = MinMaxScaler()
|
720 |
+
# default params
|
721 |
+
X_trans = scaler.fit_transform(X)
|
722 |
+
assert_array_almost_equal(X_trans.min(axis=0), 0)
|
723 |
+
assert_array_almost_equal(X_trans.max(axis=0), 1)
|
724 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
725 |
+
assert_array_almost_equal(X, X_trans_inv)
|
726 |
+
|
727 |
+
# not default params: min=1, max=2
|
728 |
+
scaler = MinMaxScaler(feature_range=(1, 2))
|
729 |
+
X_trans = scaler.fit_transform(X)
|
730 |
+
assert_array_almost_equal(X_trans.min(axis=0), 1)
|
731 |
+
assert_array_almost_equal(X_trans.max(axis=0), 2)
|
732 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
733 |
+
assert_array_almost_equal(X, X_trans_inv)
|
734 |
+
|
735 |
+
# min=-.5, max=.6
|
736 |
+
scaler = MinMaxScaler(feature_range=(-0.5, 0.6))
|
737 |
+
X_trans = scaler.fit_transform(X)
|
738 |
+
assert_array_almost_equal(X_trans.min(axis=0), -0.5)
|
739 |
+
assert_array_almost_equal(X_trans.max(axis=0), 0.6)
|
740 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
741 |
+
assert_array_almost_equal(X, X_trans_inv)
|
742 |
+
|
743 |
+
# raises on invalid range
|
744 |
+
scaler = MinMaxScaler(feature_range=(2, 1))
|
745 |
+
with pytest.raises(ValueError):
|
746 |
+
scaler.fit(X)
|
747 |
+
|
748 |
+
|
749 |
+
def test_min_max_scaler_zero_variance_features():
|
750 |
+
# Check min max scaler on toy data with zero variance features
|
751 |
+
X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]]
|
752 |
+
|
753 |
+
X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]]
|
754 |
+
|
755 |
+
# default params
|
756 |
+
scaler = MinMaxScaler()
|
757 |
+
X_trans = scaler.fit_transform(X)
|
758 |
+
X_expected_0_1 = [[0.0, 0.0, 0.5], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
|
759 |
+
assert_array_almost_equal(X_trans, X_expected_0_1)
|
760 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
761 |
+
assert_array_almost_equal(X, X_trans_inv)
|
762 |
+
|
763 |
+
X_trans_new = scaler.transform(X_new)
|
764 |
+
X_expected_0_1_new = [[+0.0, 1.0, 0.500], [-1.0, 0.0, 0.083], [+0.0, 0.0, 1.333]]
|
765 |
+
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
|
766 |
+
|
767 |
+
# not default params
|
768 |
+
scaler = MinMaxScaler(feature_range=(1, 2))
|
769 |
+
X_trans = scaler.fit_transform(X)
|
770 |
+
X_expected_1_2 = [[1.0, 1.0, 1.5], [1.0, 1.0, 1.0], [1.0, 1.0, 2.0]]
|
771 |
+
assert_array_almost_equal(X_trans, X_expected_1_2)
|
772 |
+
|
773 |
+
# function interface
|
774 |
+
X_trans = minmax_scale(X)
|
775 |
+
assert_array_almost_equal(X_trans, X_expected_0_1)
|
776 |
+
X_trans = minmax_scale(X, feature_range=(1, 2))
|
777 |
+
assert_array_almost_equal(X_trans, X_expected_1_2)
|
778 |
+
|
779 |
+
|
780 |
+
def test_minmax_scale_axis1():
|
781 |
+
X = iris.data
|
782 |
+
X_trans = minmax_scale(X, axis=1)
|
783 |
+
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
|
784 |
+
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
|
785 |
+
|
786 |
+
|
787 |
+
def test_min_max_scaler_1d():
|
788 |
+
# Test scaling of dataset along single axis
|
789 |
+
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
|
790 |
+
scaler = MinMaxScaler(copy=True)
|
791 |
+
X_scaled = scaler.fit(X).transform(X)
|
792 |
+
|
793 |
+
if isinstance(X, list):
|
794 |
+
X = np.array(X) # cast only after scaling done
|
795 |
+
|
796 |
+
if _check_dim_1axis(X) == 1:
|
797 |
+
assert_array_almost_equal(X_scaled.min(axis=0), np.zeros(n_features))
|
798 |
+
assert_array_almost_equal(X_scaled.max(axis=0), np.zeros(n_features))
|
799 |
+
else:
|
800 |
+
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
|
801 |
+
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
|
802 |
+
assert scaler.n_samples_seen_ == X.shape[0]
|
803 |
+
|
804 |
+
# check inverse transform
|
805 |
+
X_scaled_back = scaler.inverse_transform(X_scaled)
|
806 |
+
assert_array_almost_equal(X_scaled_back, X)
|
807 |
+
|
808 |
+
# Constant feature
|
809 |
+
X = np.ones((5, 1))
|
810 |
+
scaler = MinMaxScaler()
|
811 |
+
X_scaled = scaler.fit(X).transform(X)
|
812 |
+
assert X_scaled.min() >= 0.0
|
813 |
+
assert X_scaled.max() <= 1.0
|
814 |
+
assert scaler.n_samples_seen_ == X.shape[0]
|
815 |
+
|
816 |
+
# Function interface
|
817 |
+
X_1d = X_1row.ravel()
|
818 |
+
min_ = X_1d.min()
|
819 |
+
max_ = X_1d.max()
|
820 |
+
assert_array_almost_equal(
|
821 |
+
(X_1d - min_) / (max_ - min_), minmax_scale(X_1d, copy=True)
|
822 |
+
)
|
823 |
+
|
824 |
+
|
825 |
+
@pytest.mark.parametrize("sample_weight", [True, None])
|
826 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
827 |
+
def test_scaler_without_centering(sample_weight, sparse_container):
|
828 |
+
rng = np.random.RandomState(42)
|
829 |
+
X = rng.randn(4, 5)
|
830 |
+
X[:, 0] = 0.0 # first feature is always of zero
|
831 |
+
X_sparse = sparse_container(X)
|
832 |
+
|
833 |
+
if sample_weight:
|
834 |
+
sample_weight = rng.rand(X.shape[0])
|
835 |
+
|
836 |
+
with pytest.raises(ValueError):
|
837 |
+
StandardScaler().fit(X_sparse)
|
838 |
+
|
839 |
+
scaler = StandardScaler(with_mean=False).fit(X, sample_weight=sample_weight)
|
840 |
+
X_scaled = scaler.transform(X, copy=True)
|
841 |
+
assert not np.any(np.isnan(X_scaled))
|
842 |
+
|
843 |
+
scaler_sparse = StandardScaler(with_mean=False).fit(
|
844 |
+
X_sparse, sample_weight=sample_weight
|
845 |
+
)
|
846 |
+
X_sparse_scaled = scaler_sparse.transform(X_sparse, copy=True)
|
847 |
+
assert not np.any(np.isnan(X_sparse_scaled.data))
|
848 |
+
|
849 |
+
assert_array_almost_equal(scaler.mean_, scaler_sparse.mean_)
|
850 |
+
assert_array_almost_equal(scaler.var_, scaler_sparse.var_)
|
851 |
+
assert_array_almost_equal(scaler.scale_, scaler_sparse.scale_)
|
852 |
+
assert_array_almost_equal(scaler.n_samples_seen_, scaler_sparse.n_samples_seen_)
|
853 |
+
|
854 |
+
if sample_weight is None:
|
855 |
+
assert_array_almost_equal(
|
856 |
+
X_scaled.mean(axis=0), [0.0, -0.01, 2.24, -0.35, -0.78], 2
|
857 |
+
)
|
858 |
+
assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0])
|
859 |
+
|
860 |
+
X_sparse_scaled_mean, X_sparse_scaled_var = mean_variance_axis(X_sparse_scaled, 0)
|
861 |
+
assert_array_almost_equal(X_sparse_scaled_mean, X_scaled.mean(axis=0))
|
862 |
+
assert_array_almost_equal(X_sparse_scaled_var, X_scaled.var(axis=0))
|
863 |
+
|
864 |
+
# Check that X has not been modified (copy)
|
865 |
+
assert X_scaled is not X
|
866 |
+
assert X_sparse_scaled is not X_sparse
|
867 |
+
|
868 |
+
X_scaled_back = scaler.inverse_transform(X_scaled)
|
869 |
+
assert X_scaled_back is not X
|
870 |
+
assert X_scaled_back is not X_scaled
|
871 |
+
assert_array_almost_equal(X_scaled_back, X)
|
872 |
+
|
873 |
+
X_sparse_scaled_back = scaler_sparse.inverse_transform(X_sparse_scaled)
|
874 |
+
assert X_sparse_scaled_back is not X_sparse
|
875 |
+
assert X_sparse_scaled_back is not X_sparse_scaled
|
876 |
+
assert_array_almost_equal(X_sparse_scaled_back.toarray(), X)
|
877 |
+
|
878 |
+
if sparse_container in CSR_CONTAINERS:
|
879 |
+
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
|
880 |
+
X_null = null_transform.fit_transform(X_sparse)
|
881 |
+
assert_array_equal(X_null.data, X_sparse.data)
|
882 |
+
X_orig = null_transform.inverse_transform(X_null)
|
883 |
+
assert_array_equal(X_orig.data, X_sparse.data)
|
884 |
+
|
885 |
+
|
886 |
+
@pytest.mark.parametrize("with_mean", [True, False])
|
887 |
+
@pytest.mark.parametrize("with_std", [True, False])
|
888 |
+
@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS)
|
889 |
+
def test_scaler_n_samples_seen_with_nan(with_mean, with_std, sparse_container):
|
890 |
+
X = np.array(
|
891 |
+
[[0, 1, 3], [np.nan, 6, 10], [5, 4, np.nan], [8, 0, np.nan]], dtype=np.float64
|
892 |
+
)
|
893 |
+
if sparse_container is not None:
|
894 |
+
X = sparse_container(X)
|
895 |
+
|
896 |
+
if sparse.issparse(X) and with_mean:
|
897 |
+
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
|
898 |
+
|
899 |
+
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
|
900 |
+
transformer.fit(X)
|
901 |
+
|
902 |
+
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
|
903 |
+
|
904 |
+
|
905 |
+
def _check_identity_scalers_attributes(scaler_1, scaler_2):
|
906 |
+
assert scaler_1.mean_ is scaler_2.mean_ is None
|
907 |
+
assert scaler_1.var_ is scaler_2.var_ is None
|
908 |
+
assert scaler_1.scale_ is scaler_2.scale_ is None
|
909 |
+
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
|
910 |
+
|
911 |
+
|
912 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
913 |
+
def test_scaler_return_identity(sparse_container):
|
914 |
+
# test that the scaler return identity when with_mean and with_std are
|
915 |
+
# False
|
916 |
+
X_dense = np.array([[0, 1, 3], [5, 6, 0], [8, 0, 10]], dtype=np.float64)
|
917 |
+
X_sparse = sparse_container(X_dense)
|
918 |
+
|
919 |
+
transformer_dense = StandardScaler(with_mean=False, with_std=False)
|
920 |
+
X_trans_dense = transformer_dense.fit_transform(X_dense)
|
921 |
+
assert_allclose(X_trans_dense, X_dense)
|
922 |
+
|
923 |
+
transformer_sparse = clone(transformer_dense)
|
924 |
+
X_trans_sparse = transformer_sparse.fit_transform(X_sparse)
|
925 |
+
assert_allclose_dense_sparse(X_trans_sparse, X_sparse)
|
926 |
+
|
927 |
+
_check_identity_scalers_attributes(transformer_dense, transformer_sparse)
|
928 |
+
|
929 |
+
transformer_dense.partial_fit(X_dense)
|
930 |
+
transformer_sparse.partial_fit(X_sparse)
|
931 |
+
_check_identity_scalers_attributes(transformer_dense, transformer_sparse)
|
932 |
+
|
933 |
+
transformer_dense.fit(X_dense)
|
934 |
+
transformer_sparse.fit(X_sparse)
|
935 |
+
_check_identity_scalers_attributes(transformer_dense, transformer_sparse)
|
936 |
+
|
937 |
+
|
938 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
939 |
+
def test_scaler_int(sparse_container):
|
940 |
+
# test that scaler converts integer input to floating
|
941 |
+
# for both sparse and dense matrices
|
942 |
+
rng = np.random.RandomState(42)
|
943 |
+
X = rng.randint(20, size=(4, 5))
|
944 |
+
X[:, 0] = 0 # first feature is always of zero
|
945 |
+
X_sparse = sparse_container(X)
|
946 |
+
|
947 |
+
with warnings.catch_warnings(record=True):
|
948 |
+
scaler = StandardScaler(with_mean=False).fit(X)
|
949 |
+
X_scaled = scaler.transform(X, copy=True)
|
950 |
+
assert not np.any(np.isnan(X_scaled))
|
951 |
+
|
952 |
+
with warnings.catch_warnings(record=True):
|
953 |
+
scaler_sparse = StandardScaler(with_mean=False).fit(X_sparse)
|
954 |
+
X_sparse_scaled = scaler_sparse.transform(X_sparse, copy=True)
|
955 |
+
assert not np.any(np.isnan(X_sparse_scaled.data))
|
956 |
+
|
957 |
+
assert_array_almost_equal(scaler.mean_, scaler_sparse.mean_)
|
958 |
+
assert_array_almost_equal(scaler.var_, scaler_sparse.var_)
|
959 |
+
assert_array_almost_equal(scaler.scale_, scaler_sparse.scale_)
|
960 |
+
|
961 |
+
assert_array_almost_equal(
|
962 |
+
X_scaled.mean(axis=0), [0.0, 1.109, 1.856, 21.0, 1.559], 2
|
963 |
+
)
|
964 |
+
assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0])
|
965 |
+
|
966 |
+
X_sparse_scaled_mean, X_sparse_scaled_std = mean_variance_axis(
|
967 |
+
X_sparse_scaled.astype(float), 0
|
968 |
+
)
|
969 |
+
assert_array_almost_equal(X_sparse_scaled_mean, X_scaled.mean(axis=0))
|
970 |
+
assert_array_almost_equal(X_sparse_scaled_std, X_scaled.std(axis=0))
|
971 |
+
|
972 |
+
# Check that X has not been modified (copy)
|
973 |
+
assert X_scaled is not X
|
974 |
+
assert X_sparse_scaled is not X_sparse
|
975 |
+
|
976 |
+
X_scaled_back = scaler.inverse_transform(X_scaled)
|
977 |
+
assert X_scaled_back is not X
|
978 |
+
assert X_scaled_back is not X_scaled
|
979 |
+
assert_array_almost_equal(X_scaled_back, X)
|
980 |
+
|
981 |
+
X_sparse_scaled_back = scaler_sparse.inverse_transform(X_sparse_scaled)
|
982 |
+
assert X_sparse_scaled_back is not X_sparse
|
983 |
+
assert X_sparse_scaled_back is not X_sparse_scaled
|
984 |
+
assert_array_almost_equal(X_sparse_scaled_back.toarray(), X)
|
985 |
+
|
986 |
+
if sparse_container in CSR_CONTAINERS:
|
987 |
+
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
|
988 |
+
with warnings.catch_warnings(record=True):
|
989 |
+
X_null = null_transform.fit_transform(X_sparse)
|
990 |
+
assert_array_equal(X_null.data, X_sparse.data)
|
991 |
+
X_orig = null_transform.inverse_transform(X_null)
|
992 |
+
assert_array_equal(X_orig.data, X_sparse.data)
|
993 |
+
|
994 |
+
|
995 |
+
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
|
996 |
+
def test_scaler_without_copy(sparse_container):
|
997 |
+
# Check that StandardScaler.fit does not change input
|
998 |
+
rng = np.random.RandomState(42)
|
999 |
+
X = rng.randn(4, 5)
|
1000 |
+
X[:, 0] = 0.0 # first feature is always of zero
|
1001 |
+
X_sparse = sparse_container(X)
|
1002 |
+
|
1003 |
+
X_copy = X.copy()
|
1004 |
+
StandardScaler(copy=False).fit(X)
|
1005 |
+
assert_array_equal(X, X_copy)
|
1006 |
+
|
1007 |
+
X_sparse_copy = X_sparse.copy()
|
1008 |
+
StandardScaler(with_mean=False, copy=False).fit(X_sparse)
|
1009 |
+
assert_array_equal(X_sparse.toarray(), X_sparse_copy.toarray())
|
1010 |
+
|
1011 |
+
|
1012 |
+
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
|
1013 |
+
def test_scale_sparse_with_mean_raise_exception(sparse_container):
|
1014 |
+
rng = np.random.RandomState(42)
|
1015 |
+
X = rng.randn(4, 5)
|
1016 |
+
X_sparse = sparse_container(X)
|
1017 |
+
|
1018 |
+
# check scaling and fit with direct calls on sparse data
|
1019 |
+
with pytest.raises(ValueError):
|
1020 |
+
scale(X_sparse, with_mean=True)
|
1021 |
+
with pytest.raises(ValueError):
|
1022 |
+
StandardScaler(with_mean=True).fit(X_sparse)
|
1023 |
+
|
1024 |
+
# check transform and inverse_transform after a fit on a dense array
|
1025 |
+
scaler = StandardScaler(with_mean=True).fit(X)
|
1026 |
+
with pytest.raises(ValueError):
|
1027 |
+
scaler.transform(X_sparse)
|
1028 |
+
|
1029 |
+
X_transformed_sparse = sparse_container(scaler.transform(X))
|
1030 |
+
with pytest.raises(ValueError):
|
1031 |
+
scaler.inverse_transform(X_transformed_sparse)
|
1032 |
+
|
1033 |
+
|
1034 |
+
def test_scale_input_finiteness_validation():
|
1035 |
+
# Check if non finite inputs raise ValueError
|
1036 |
+
X = [[np.inf, 5, 6, 7, 8]]
|
1037 |
+
with pytest.raises(
|
1038 |
+
ValueError, match="Input contains infinity or a value too large"
|
1039 |
+
):
|
1040 |
+
scale(X)
|
1041 |
+
|
1042 |
+
|
1043 |
+
def test_robust_scaler_error_sparse():
|
1044 |
+
X_sparse = sparse.rand(1000, 10)
|
1045 |
+
scaler = RobustScaler(with_centering=True)
|
1046 |
+
err_msg = "Cannot center sparse matrices"
|
1047 |
+
with pytest.raises(ValueError, match=err_msg):
|
1048 |
+
scaler.fit(X_sparse)
|
1049 |
+
|
1050 |
+
|
1051 |
+
@pytest.mark.parametrize("with_centering", [True, False])
|
1052 |
+
@pytest.mark.parametrize("with_scaling", [True, False])
|
1053 |
+
@pytest.mark.parametrize("X", [np.random.randn(10, 3), sparse.rand(10, 3, density=0.5)])
|
1054 |
+
def test_robust_scaler_attributes(X, with_centering, with_scaling):
|
1055 |
+
# check consistent type of attributes
|
1056 |
+
if with_centering and sparse.issparse(X):
|
1057 |
+
pytest.skip("RobustScaler cannot center sparse matrix")
|
1058 |
+
|
1059 |
+
scaler = RobustScaler(with_centering=with_centering, with_scaling=with_scaling)
|
1060 |
+
scaler.fit(X)
|
1061 |
+
|
1062 |
+
if with_centering:
|
1063 |
+
assert isinstance(scaler.center_, np.ndarray)
|
1064 |
+
else:
|
1065 |
+
assert scaler.center_ is None
|
1066 |
+
if with_scaling:
|
1067 |
+
assert isinstance(scaler.scale_, np.ndarray)
|
1068 |
+
else:
|
1069 |
+
assert scaler.scale_ is None
|
1070 |
+
|
1071 |
+
|
1072 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1073 |
+
def test_robust_scaler_col_zero_sparse(csr_container):
|
1074 |
+
# check that the scaler is working when there is not data materialized in a
|
1075 |
+
# column of a sparse matrix
|
1076 |
+
X = np.random.randn(10, 5)
|
1077 |
+
X[:, 0] = 0
|
1078 |
+
X = csr_container(X)
|
1079 |
+
|
1080 |
+
scaler = RobustScaler(with_centering=False)
|
1081 |
+
scaler.fit(X)
|
1082 |
+
assert scaler.scale_[0] == pytest.approx(1)
|
1083 |
+
|
1084 |
+
X_trans = scaler.transform(X)
|
1085 |
+
assert_allclose(X[:, [0]].toarray(), X_trans[:, [0]].toarray())
|
1086 |
+
|
1087 |
+
|
1088 |
+
def test_robust_scaler_2d_arrays():
|
1089 |
+
# Test robust scaling of 2d array along first axis
|
1090 |
+
rng = np.random.RandomState(0)
|
1091 |
+
X = rng.randn(4, 5)
|
1092 |
+
X[:, 0] = 0.0 # first feature is always of zero
|
1093 |
+
|
1094 |
+
scaler = RobustScaler()
|
1095 |
+
X_scaled = scaler.fit(X).transform(X)
|
1096 |
+
|
1097 |
+
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
|
1098 |
+
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
|
1099 |
+
|
1100 |
+
|
1101 |
+
@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1])
|
1102 |
+
@pytest.mark.parametrize("strictly_signed", ["positive", "negative", "zeros", None])
|
1103 |
+
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
|
1104 |
+
# Check the equivalence of the fitting with dense and sparse matrices
|
1105 |
+
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
|
1106 |
+
if strictly_signed == "positive":
|
1107 |
+
X_sparse.data = np.abs(X_sparse.data)
|
1108 |
+
elif strictly_signed == "negative":
|
1109 |
+
X_sparse.data = -np.abs(X_sparse.data)
|
1110 |
+
elif strictly_signed == "zeros":
|
1111 |
+
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
|
1112 |
+
X_dense = X_sparse.toarray()
|
1113 |
+
|
1114 |
+
scaler_sparse = RobustScaler(with_centering=False)
|
1115 |
+
scaler_dense = RobustScaler(with_centering=False)
|
1116 |
+
|
1117 |
+
scaler_sparse.fit(X_sparse)
|
1118 |
+
scaler_dense.fit(X_dense)
|
1119 |
+
|
1120 |
+
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
|
1121 |
+
|
1122 |
+
|
1123 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1124 |
+
def test_robust_scaler_transform_one_row_csr(csr_container):
|
1125 |
+
# Check RobustScaler on transforming csr matrix with one row
|
1126 |
+
rng = np.random.RandomState(0)
|
1127 |
+
X = rng.randn(4, 5)
|
1128 |
+
single_row = np.array([[0.1, 1.0, 2.0, 0.0, -1.0]])
|
1129 |
+
scaler = RobustScaler(with_centering=False)
|
1130 |
+
scaler = scaler.fit(X)
|
1131 |
+
row_trans = scaler.transform(csr_container(single_row))
|
1132 |
+
row_expected = single_row / scaler.scale_
|
1133 |
+
assert_array_almost_equal(row_trans.toarray(), row_expected)
|
1134 |
+
row_scaled_back = scaler.inverse_transform(row_trans)
|
1135 |
+
assert_array_almost_equal(single_row, row_scaled_back.toarray())
|
1136 |
+
|
1137 |
+
|
1138 |
+
def test_robust_scaler_iris():
|
1139 |
+
X = iris.data
|
1140 |
+
scaler = RobustScaler()
|
1141 |
+
X_trans = scaler.fit_transform(X)
|
1142 |
+
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
|
1143 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
1144 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1145 |
+
q = np.percentile(X_trans, q=(25, 75), axis=0)
|
1146 |
+
iqr = q[1] - q[0]
|
1147 |
+
assert_array_almost_equal(iqr, 1)
|
1148 |
+
|
1149 |
+
|
1150 |
+
def test_robust_scaler_iris_quantiles():
|
1151 |
+
X = iris.data
|
1152 |
+
scaler = RobustScaler(quantile_range=(10, 90))
|
1153 |
+
X_trans = scaler.fit_transform(X)
|
1154 |
+
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
|
1155 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
1156 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1157 |
+
q = np.percentile(X_trans, q=(10, 90), axis=0)
|
1158 |
+
q_range = q[1] - q[0]
|
1159 |
+
assert_array_almost_equal(q_range, 1)
|
1160 |
+
|
1161 |
+
|
1162 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
1163 |
+
def test_quantile_transform_iris(csc_container):
|
1164 |
+
X = iris.data
|
1165 |
+
# uniform output distribution
|
1166 |
+
transformer = QuantileTransformer(n_quantiles=30)
|
1167 |
+
X_trans = transformer.fit_transform(X)
|
1168 |
+
X_trans_inv = transformer.inverse_transform(X_trans)
|
1169 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1170 |
+
# normal output distribution
|
1171 |
+
transformer = QuantileTransformer(n_quantiles=30, output_distribution="normal")
|
1172 |
+
X_trans = transformer.fit_transform(X)
|
1173 |
+
X_trans_inv = transformer.inverse_transform(X_trans)
|
1174 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1175 |
+
# make sure it is possible to take the inverse of a sparse matrix
|
1176 |
+
# which contain negative value; this is the case in the iris dataset
|
1177 |
+
X_sparse = csc_container(X)
|
1178 |
+
X_sparse_tran = transformer.fit_transform(X_sparse)
|
1179 |
+
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
|
1180 |
+
assert_array_almost_equal(X_sparse.toarray(), X_sparse_tran_inv.toarray())
|
1181 |
+
|
1182 |
+
|
1183 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
1184 |
+
def test_quantile_transform_check_error(csc_container):
|
1185 |
+
X = np.transpose(
|
1186 |
+
[
|
1187 |
+
[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
|
1188 |
+
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
|
1189 |
+
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1],
|
1190 |
+
]
|
1191 |
+
)
|
1192 |
+
X = csc_container(X)
|
1193 |
+
X_neg = np.transpose(
|
1194 |
+
[
|
1195 |
+
[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
|
1196 |
+
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
|
1197 |
+
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1],
|
1198 |
+
]
|
1199 |
+
)
|
1200 |
+
X_neg = csc_container(X_neg)
|
1201 |
+
|
1202 |
+
err_msg = (
|
1203 |
+
"The number of quantiles cannot be greater than "
|
1204 |
+
"the number of samples used. Got 1000 quantiles "
|
1205 |
+
"and 10 samples."
|
1206 |
+
)
|
1207 |
+
with pytest.raises(ValueError, match=err_msg):
|
1208 |
+
QuantileTransformer(subsample=10).fit(X)
|
1209 |
+
|
1210 |
+
transformer = QuantileTransformer(n_quantiles=10)
|
1211 |
+
err_msg = "QuantileTransformer only accepts non-negative sparse matrices."
|
1212 |
+
with pytest.raises(ValueError, match=err_msg):
|
1213 |
+
transformer.fit(X_neg)
|
1214 |
+
transformer.fit(X)
|
1215 |
+
err_msg = "QuantileTransformer only accepts non-negative sparse matrices."
|
1216 |
+
with pytest.raises(ValueError, match=err_msg):
|
1217 |
+
transformer.transform(X_neg)
|
1218 |
+
|
1219 |
+
X_bad_feat = np.transpose(
|
1220 |
+
[[0, 25, 50, 0, 0, 0, 75, 0, 0, 100], [0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]]
|
1221 |
+
)
|
1222 |
+
err_msg = (
|
1223 |
+
"X has 2 features, but QuantileTransformer is expecting 3 features as input."
|
1224 |
+
)
|
1225 |
+
with pytest.raises(ValueError, match=err_msg):
|
1226 |
+
transformer.inverse_transform(X_bad_feat)
|
1227 |
+
|
1228 |
+
transformer = QuantileTransformer(n_quantiles=10).fit(X)
|
1229 |
+
# check that an error is raised if input is scalar
|
1230 |
+
with pytest.raises(ValueError, match="Expected 2D array, got scalar array instead"):
|
1231 |
+
transformer.transform(10)
|
1232 |
+
# check that a warning is raised is n_quantiles > n_samples
|
1233 |
+
transformer = QuantileTransformer(n_quantiles=100)
|
1234 |
+
warn_msg = "n_quantiles is set to n_samples"
|
1235 |
+
with pytest.warns(UserWarning, match=warn_msg) as record:
|
1236 |
+
transformer.fit(X)
|
1237 |
+
assert len(record) == 1
|
1238 |
+
assert transformer.n_quantiles_ == X.shape[0]
|
1239 |
+
|
1240 |
+
|
1241 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
1242 |
+
def test_quantile_transform_sparse_ignore_zeros(csc_container):
|
1243 |
+
X = np.array([[0, 1], [0, 0], [0, 2], [0, 2], [0, 1]])
|
1244 |
+
X_sparse = csc_container(X)
|
1245 |
+
transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5)
|
1246 |
+
|
1247 |
+
# dense case -> warning raise
|
1248 |
+
warning_message = (
|
1249 |
+
"'ignore_implicit_zeros' takes effect"
|
1250 |
+
" only with sparse matrix. This parameter has no"
|
1251 |
+
" effect."
|
1252 |
+
)
|
1253 |
+
with pytest.warns(UserWarning, match=warning_message):
|
1254 |
+
transformer.fit(X)
|
1255 |
+
|
1256 |
+
X_expected = np.array([[0, 0], [0, 0], [0, 1], [0, 1], [0, 0]])
|
1257 |
+
X_trans = transformer.fit_transform(X_sparse)
|
1258 |
+
assert_almost_equal(X_expected, X_trans.toarray())
|
1259 |
+
|
1260 |
+
# consider the case where sparse entries are missing values and user-given
|
1261 |
+
# zeros are to be considered
|
1262 |
+
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
|
1263 |
+
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
|
1264 |
+
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
|
1265 |
+
X_sparse = csc_container((X_data, (X_row, X_col)))
|
1266 |
+
X_trans = transformer.fit_transform(X_sparse)
|
1267 |
+
X_expected = np.array(
|
1268 |
+
[
|
1269 |
+
[0.0, 0.5],
|
1270 |
+
[0.0, 0.0],
|
1271 |
+
[0.0, 1.0],
|
1272 |
+
[0.0, 1.0],
|
1273 |
+
[0.0, 0.5],
|
1274 |
+
[0.0, 0.0],
|
1275 |
+
[0.0, 0.5],
|
1276 |
+
[0.0, 1.0],
|
1277 |
+
[0.0, 0.0],
|
1278 |
+
]
|
1279 |
+
)
|
1280 |
+
assert_almost_equal(X_expected, X_trans.toarray())
|
1281 |
+
|
1282 |
+
transformer = QuantileTransformer(ignore_implicit_zeros=True, n_quantiles=5)
|
1283 |
+
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
|
1284 |
+
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
|
1285 |
+
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
|
1286 |
+
X_sparse = csc_container((X_data, (X_row, X_col)))
|
1287 |
+
X_trans = transformer.fit_transform(X_sparse)
|
1288 |
+
X_expected = np.array(
|
1289 |
+
[[0, 1], [0, 0.375], [0, 0.375], [0, 0.375], [0, 1], [0, 0], [0, 1]]
|
1290 |
+
)
|
1291 |
+
assert_almost_equal(X_expected, X_trans.toarray())
|
1292 |
+
assert_almost_equal(
|
1293 |
+
X_sparse.toarray(), transformer.inverse_transform(X_trans).toarray()
|
1294 |
+
)
|
1295 |
+
|
1296 |
+
# check in conjunction with subsampling
|
1297 |
+
transformer = QuantileTransformer(
|
1298 |
+
ignore_implicit_zeros=True, n_quantiles=5, subsample=8, random_state=0
|
1299 |
+
)
|
1300 |
+
X_trans = transformer.fit_transform(X_sparse)
|
1301 |
+
assert_almost_equal(X_expected, X_trans.toarray())
|
1302 |
+
assert_almost_equal(
|
1303 |
+
X_sparse.toarray(), transformer.inverse_transform(X_trans).toarray()
|
1304 |
+
)
|
1305 |
+
|
1306 |
+
|
1307 |
+
def test_quantile_transform_dense_toy():
|
1308 |
+
X = np.array(
|
1309 |
+
[[0, 2, 2.6], [25, 4, 4.1], [50, 6, 2.3], [75, 8, 9.5], [100, 10, 0.1]]
|
1310 |
+
)
|
1311 |
+
|
1312 |
+
transformer = QuantileTransformer(n_quantiles=5)
|
1313 |
+
transformer.fit(X)
|
1314 |
+
|
1315 |
+
# using a uniform output, each entry of X should be map between 0 and 1
|
1316 |
+
# and equally spaced
|
1317 |
+
X_trans = transformer.fit_transform(X)
|
1318 |
+
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
|
1319 |
+
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
|
1320 |
+
|
1321 |
+
X_test = np.array(
|
1322 |
+
[
|
1323 |
+
[-1, 1, 0],
|
1324 |
+
[101, 11, 10],
|
1325 |
+
]
|
1326 |
+
)
|
1327 |
+
X_expected = np.array(
|
1328 |
+
[
|
1329 |
+
[0, 0, 0],
|
1330 |
+
[1, 1, 1],
|
1331 |
+
]
|
1332 |
+
)
|
1333 |
+
assert_array_almost_equal(transformer.transform(X_test), X_expected)
|
1334 |
+
|
1335 |
+
X_trans_inv = transformer.inverse_transform(X_trans)
|
1336 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1337 |
+
|
1338 |
+
|
1339 |
+
def test_quantile_transform_subsampling():
|
1340 |
+
# Test that subsampling the input yield to a consistent results We check
|
1341 |
+
# that the computed quantiles are almost mapped to a [0, 1] vector where
|
1342 |
+
# values are equally spaced. The infinite norm is checked to be smaller
|
1343 |
+
# than a given threshold. This is repeated 5 times.
|
1344 |
+
|
1345 |
+
# dense support
|
1346 |
+
n_samples = 1000000
|
1347 |
+
n_quantiles = 1000
|
1348 |
+
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
|
1349 |
+
ROUND = 5
|
1350 |
+
inf_norm_arr = []
|
1351 |
+
for random_state in range(ROUND):
|
1352 |
+
transformer = QuantileTransformer(
|
1353 |
+
random_state=random_state,
|
1354 |
+
n_quantiles=n_quantiles,
|
1355 |
+
subsample=n_samples // 10,
|
1356 |
+
)
|
1357 |
+
transformer.fit(X)
|
1358 |
+
diff = np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_)
|
1359 |
+
inf_norm = np.max(np.abs(diff))
|
1360 |
+
assert inf_norm < 1e-2
|
1361 |
+
inf_norm_arr.append(inf_norm)
|
1362 |
+
# each random subsampling yield a unique approximation to the expected
|
1363 |
+
# linspace CDF
|
1364 |
+
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
|
1365 |
+
|
1366 |
+
# sparse support
|
1367 |
+
|
1368 |
+
X = sparse.rand(n_samples, 1, density=0.99, format="csc", random_state=0)
|
1369 |
+
inf_norm_arr = []
|
1370 |
+
for random_state in range(ROUND):
|
1371 |
+
transformer = QuantileTransformer(
|
1372 |
+
random_state=random_state,
|
1373 |
+
n_quantiles=n_quantiles,
|
1374 |
+
subsample=n_samples // 10,
|
1375 |
+
)
|
1376 |
+
transformer.fit(X)
|
1377 |
+
diff = np.linspace(0, 1, n_quantiles) - np.ravel(transformer.quantiles_)
|
1378 |
+
inf_norm = np.max(np.abs(diff))
|
1379 |
+
assert inf_norm < 1e-1
|
1380 |
+
inf_norm_arr.append(inf_norm)
|
1381 |
+
# each random subsampling yield a unique approximation to the expected
|
1382 |
+
# linspace CDF
|
1383 |
+
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
|
1384 |
+
|
1385 |
+
|
1386 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
1387 |
+
def test_quantile_transform_sparse_toy(csc_container):
|
1388 |
+
X = np.array(
|
1389 |
+
[
|
1390 |
+
[0.0, 2.0, 0.0],
|
1391 |
+
[25.0, 4.0, 0.0],
|
1392 |
+
[50.0, 0.0, 2.6],
|
1393 |
+
[0.0, 0.0, 4.1],
|
1394 |
+
[0.0, 6.0, 0.0],
|
1395 |
+
[0.0, 8.0, 0.0],
|
1396 |
+
[75.0, 0.0, 2.3],
|
1397 |
+
[0.0, 10.0, 0.0],
|
1398 |
+
[0.0, 0.0, 9.5],
|
1399 |
+
[100.0, 0.0, 0.1],
|
1400 |
+
]
|
1401 |
+
)
|
1402 |
+
|
1403 |
+
X = csc_container(X)
|
1404 |
+
|
1405 |
+
transformer = QuantileTransformer(n_quantiles=10)
|
1406 |
+
transformer.fit(X)
|
1407 |
+
|
1408 |
+
X_trans = transformer.fit_transform(X)
|
1409 |
+
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.0)
|
1410 |
+
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.0)
|
1411 |
+
|
1412 |
+
X_trans_inv = transformer.inverse_transform(X_trans)
|
1413 |
+
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
|
1414 |
+
|
1415 |
+
transformer_dense = QuantileTransformer(n_quantiles=10).fit(X.toarray())
|
1416 |
+
|
1417 |
+
X_trans = transformer_dense.transform(X)
|
1418 |
+
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.0)
|
1419 |
+
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.0)
|
1420 |
+
|
1421 |
+
X_trans_inv = transformer_dense.inverse_transform(X_trans)
|
1422 |
+
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
|
1423 |
+
|
1424 |
+
|
1425 |
+
def test_quantile_transform_axis1():
|
1426 |
+
X = np.array([[0, 25, 50, 75, 100], [2, 4, 6, 8, 10], [2.6, 4.1, 2.3, 9.5, 0.1]])
|
1427 |
+
|
1428 |
+
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
|
1429 |
+
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
|
1430 |
+
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
|
1431 |
+
|
1432 |
+
|
1433 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
1434 |
+
def test_quantile_transform_bounds(csc_container):
|
1435 |
+
# Lower and upper bounds are manually mapped. We checked that in the case
|
1436 |
+
# of a constant feature and binary feature, the bounds are properly mapped.
|
1437 |
+
X_dense = np.array([[0, 0], [0, 0], [1, 0]])
|
1438 |
+
X_sparse = csc_container(X_dense)
|
1439 |
+
|
1440 |
+
# check sparse and dense are consistent
|
1441 |
+
X_trans = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform(X_dense)
|
1442 |
+
assert_array_almost_equal(X_trans, X_dense)
|
1443 |
+
X_trans_sp = QuantileTransformer(n_quantiles=3, random_state=0).fit_transform(
|
1444 |
+
X_sparse
|
1445 |
+
)
|
1446 |
+
assert_array_almost_equal(X_trans_sp.toarray(), X_dense)
|
1447 |
+
assert_array_almost_equal(X_trans, X_trans_sp.toarray())
|
1448 |
+
|
1449 |
+
# check the consistency of the bounds by learning on 1 matrix
|
1450 |
+
# and transforming another
|
1451 |
+
X = np.array([[0, 1], [0, 0.5], [1, 0]])
|
1452 |
+
X1 = np.array([[0, 0.1], [0, 0.5], [1, 0.1]])
|
1453 |
+
transformer = QuantileTransformer(n_quantiles=3).fit(X)
|
1454 |
+
X_trans = transformer.transform(X1)
|
1455 |
+
assert_array_almost_equal(X_trans, X1)
|
1456 |
+
|
1457 |
+
# check that values outside of the range learned will be mapped properly.
|
1458 |
+
X = np.random.random((1000, 1))
|
1459 |
+
transformer = QuantileTransformer()
|
1460 |
+
transformer.fit(X)
|
1461 |
+
assert transformer.transform([[-10]]) == transformer.transform([[np.min(X)]])
|
1462 |
+
assert transformer.transform([[10]]) == transformer.transform([[np.max(X)]])
|
1463 |
+
assert transformer.inverse_transform([[-10]]) == transformer.inverse_transform(
|
1464 |
+
[[np.min(transformer.references_)]]
|
1465 |
+
)
|
1466 |
+
assert transformer.inverse_transform([[10]]) == transformer.inverse_transform(
|
1467 |
+
[[np.max(transformer.references_)]]
|
1468 |
+
)
|
1469 |
+
|
1470 |
+
|
1471 |
+
def test_quantile_transform_and_inverse():
|
1472 |
+
X_1 = iris.data
|
1473 |
+
X_2 = np.array([[0.0], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]])
|
1474 |
+
for X in [X_1, X_2]:
|
1475 |
+
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
|
1476 |
+
X_trans = transformer.fit_transform(X)
|
1477 |
+
X_trans_inv = transformer.inverse_transform(X_trans)
|
1478 |
+
assert_array_almost_equal(X, X_trans_inv, decimal=9)
|
1479 |
+
|
1480 |
+
|
1481 |
+
def test_quantile_transform_nan():
|
1482 |
+
X = np.array([[np.nan, 0, 0, 1], [np.nan, np.nan, 0, 0.5], [np.nan, 1, 1, 0]])
|
1483 |
+
|
1484 |
+
transformer = QuantileTransformer(n_quantiles=10, random_state=42)
|
1485 |
+
transformer.fit_transform(X)
|
1486 |
+
|
1487 |
+
# check that the quantile of the first column is all NaN
|
1488 |
+
assert np.isnan(transformer.quantiles_[:, 0]).all()
|
1489 |
+
# all other column should not contain NaN
|
1490 |
+
assert not np.isnan(transformer.quantiles_[:, 1:]).any()
|
1491 |
+
|
1492 |
+
|
1493 |
+
@pytest.mark.parametrize("array_type", ["array", "sparse"])
|
1494 |
+
def test_quantile_transformer_sorted_quantiles(array_type):
|
1495 |
+
# Non-regression test for:
|
1496 |
+
# https://github.com/scikit-learn/scikit-learn/issues/15733
|
1497 |
+
# Taken from upstream bug report:
|
1498 |
+
# https://github.com/numpy/numpy/issues/14685
|
1499 |
+
X = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, 8, 8, 7] * 10)
|
1500 |
+
X = 0.1 * X.reshape(-1, 1)
|
1501 |
+
X = _convert_container(X, array_type)
|
1502 |
+
|
1503 |
+
n_quantiles = 100
|
1504 |
+
qt = QuantileTransformer(n_quantiles=n_quantiles).fit(X)
|
1505 |
+
|
1506 |
+
# Check that the estimated quantile thresholds are monotically
|
1507 |
+
# increasing:
|
1508 |
+
quantiles = qt.quantiles_[:, 0]
|
1509 |
+
assert len(quantiles) == 100
|
1510 |
+
assert all(np.diff(quantiles) >= 0)
|
1511 |
+
|
1512 |
+
|
1513 |
+
def test_robust_scaler_invalid_range():
|
1514 |
+
for range_ in [
|
1515 |
+
(-1, 90),
|
1516 |
+
(-2, -3),
|
1517 |
+
(10, 101),
|
1518 |
+
(100.5, 101),
|
1519 |
+
(90, 50),
|
1520 |
+
]:
|
1521 |
+
scaler = RobustScaler(quantile_range=range_)
|
1522 |
+
|
1523 |
+
with pytest.raises(ValueError, match=r"Invalid quantile range: \("):
|
1524 |
+
scaler.fit(iris.data)
|
1525 |
+
|
1526 |
+
|
1527 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1528 |
+
def test_scale_function_without_centering(csr_container):
|
1529 |
+
rng = np.random.RandomState(42)
|
1530 |
+
X = rng.randn(4, 5)
|
1531 |
+
X[:, 0] = 0.0 # first feature is always of zero
|
1532 |
+
X_csr = csr_container(X)
|
1533 |
+
|
1534 |
+
X_scaled = scale(X, with_mean=False)
|
1535 |
+
assert not np.any(np.isnan(X_scaled))
|
1536 |
+
|
1537 |
+
X_csr_scaled = scale(X_csr, with_mean=False)
|
1538 |
+
assert not np.any(np.isnan(X_csr_scaled.data))
|
1539 |
+
|
1540 |
+
# test csc has same outcome
|
1541 |
+
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
|
1542 |
+
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
|
1543 |
+
|
1544 |
+
# raises value error on axis != 0
|
1545 |
+
with pytest.raises(ValueError):
|
1546 |
+
scale(X_csr, with_mean=False, axis=1)
|
1547 |
+
|
1548 |
+
assert_array_almost_equal(
|
1549 |
+
X_scaled.mean(axis=0), [0.0, -0.01, 2.24, -0.35, -0.78], 2
|
1550 |
+
)
|
1551 |
+
assert_array_almost_equal(X_scaled.std(axis=0), [0.0, 1.0, 1.0, 1.0, 1.0])
|
1552 |
+
# Check that X has not been copied
|
1553 |
+
assert X_scaled is not X
|
1554 |
+
|
1555 |
+
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
|
1556 |
+
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
|
1557 |
+
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
|
1558 |
+
|
1559 |
+
# null scale
|
1560 |
+
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
|
1561 |
+
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
|
1562 |
+
|
1563 |
+
|
1564 |
+
def test_robust_scale_axis1():
|
1565 |
+
X = iris.data
|
1566 |
+
X_trans = robust_scale(X, axis=1)
|
1567 |
+
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
|
1568 |
+
q = np.percentile(X_trans, q=(25, 75), axis=1)
|
1569 |
+
iqr = q[1] - q[0]
|
1570 |
+
assert_array_almost_equal(iqr, 1)
|
1571 |
+
|
1572 |
+
|
1573 |
+
def test_robust_scale_1d_array():
|
1574 |
+
X = iris.data[:, 1]
|
1575 |
+
X_trans = robust_scale(X)
|
1576 |
+
assert_array_almost_equal(np.median(X_trans), 0)
|
1577 |
+
q = np.percentile(X_trans, q=(25, 75))
|
1578 |
+
iqr = q[1] - q[0]
|
1579 |
+
assert_array_almost_equal(iqr, 1)
|
1580 |
+
|
1581 |
+
|
1582 |
+
def test_robust_scaler_zero_variance_features():
|
1583 |
+
# Check RobustScaler on toy data with zero variance features
|
1584 |
+
X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.1], [0.0, 1.0, +1.1]]
|
1585 |
+
|
1586 |
+
scaler = RobustScaler()
|
1587 |
+
X_trans = scaler.fit_transform(X)
|
1588 |
+
|
1589 |
+
# NOTE: for such a small sample size, what we expect in the third column
|
1590 |
+
# depends HEAVILY on the method used to calculate quantiles. The values
|
1591 |
+
# here were calculated to fit the quantiles produces by np.percentile
|
1592 |
+
# using numpy 1.9 Calculating quantiles with
|
1593 |
+
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
|
1594 |
+
# would yield very different results!
|
1595 |
+
X_expected = [[0.0, 0.0, +0.0], [0.0, 0.0, -1.0], [0.0, 0.0, +1.0]]
|
1596 |
+
assert_array_almost_equal(X_trans, X_expected)
|
1597 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
1598 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1599 |
+
|
1600 |
+
# make sure new data gets transformed correctly
|
1601 |
+
X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]]
|
1602 |
+
X_trans_new = scaler.transform(X_new)
|
1603 |
+
X_expected_new = [[+0.0, 1.0, +0.0], [-1.0, 0.0, -0.83333], [+0.0, 0.0, +1.66667]]
|
1604 |
+
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
|
1605 |
+
|
1606 |
+
|
1607 |
+
def test_robust_scaler_unit_variance():
|
1608 |
+
# Check RobustScaler with unit_variance=True on standard normal data with
|
1609 |
+
# outliers
|
1610 |
+
rng = np.random.RandomState(42)
|
1611 |
+
X = rng.randn(1000000, 1)
|
1612 |
+
X_with_outliers = np.vstack([X, np.ones((100, 1)) * 100, np.ones((100, 1)) * -100])
|
1613 |
+
|
1614 |
+
quantile_range = (1, 99)
|
1615 |
+
robust_scaler = RobustScaler(quantile_range=quantile_range, unit_variance=True).fit(
|
1616 |
+
X_with_outliers
|
1617 |
+
)
|
1618 |
+
X_trans = robust_scaler.transform(X)
|
1619 |
+
|
1620 |
+
assert robust_scaler.center_ == pytest.approx(0, abs=1e-3)
|
1621 |
+
assert robust_scaler.scale_ == pytest.approx(1, abs=1e-2)
|
1622 |
+
assert X_trans.std() == pytest.approx(1, abs=1e-2)
|
1623 |
+
|
1624 |
+
|
1625 |
+
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
|
1626 |
+
def test_maxabs_scaler_zero_variance_features(sparse_container):
|
1627 |
+
# Check MaxAbsScaler on toy data with zero variance features
|
1628 |
+
X = [[0.0, 1.0, +0.5], [0.0, 1.0, -0.3], [0.0, 1.0, +1.5], [0.0, 0.0, +0.0]]
|
1629 |
+
|
1630 |
+
scaler = MaxAbsScaler()
|
1631 |
+
X_trans = scaler.fit_transform(X)
|
1632 |
+
X_expected = [
|
1633 |
+
[0.0, 1.0, 1.0 / 3.0],
|
1634 |
+
[0.0, 1.0, -0.2],
|
1635 |
+
[0.0, 1.0, 1.0],
|
1636 |
+
[0.0, 0.0, 0.0],
|
1637 |
+
]
|
1638 |
+
assert_array_almost_equal(X_trans, X_expected)
|
1639 |
+
X_trans_inv = scaler.inverse_transform(X_trans)
|
1640 |
+
assert_array_almost_equal(X, X_trans_inv)
|
1641 |
+
|
1642 |
+
# make sure new data gets transformed correctly
|
1643 |
+
X_new = [[+0.0, 2.0, 0.5], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.5]]
|
1644 |
+
X_trans_new = scaler.transform(X_new)
|
1645 |
+
X_expected_new = [[+0.0, 2.0, 1.0 / 3.0], [-1.0, 1.0, 0.0], [+0.0, 1.0, 1.0]]
|
1646 |
+
|
1647 |
+
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
|
1648 |
+
|
1649 |
+
# function interface
|
1650 |
+
X_trans = maxabs_scale(X)
|
1651 |
+
assert_array_almost_equal(X_trans, X_expected)
|
1652 |
+
|
1653 |
+
# sparse data
|
1654 |
+
X_sparse = sparse_container(X)
|
1655 |
+
X_trans_sparse = scaler.fit_transform(X_sparse)
|
1656 |
+
X_expected = [
|
1657 |
+
[0.0, 1.0, 1.0 / 3.0],
|
1658 |
+
[0.0, 1.0, -0.2],
|
1659 |
+
[0.0, 1.0, 1.0],
|
1660 |
+
[0.0, 0.0, 0.0],
|
1661 |
+
]
|
1662 |
+
assert_array_almost_equal(X_trans_sparse.toarray(), X_expected)
|
1663 |
+
X_trans_sparse_inv = scaler.inverse_transform(X_trans_sparse)
|
1664 |
+
assert_array_almost_equal(X, X_trans_sparse_inv.toarray())
|
1665 |
+
|
1666 |
+
|
1667 |
+
def test_maxabs_scaler_large_negative_value():
|
1668 |
+
# Check MaxAbsScaler on toy data with a large negative value
|
1669 |
+
X = [
|
1670 |
+
[0.0, 1.0, +0.5, -1.0],
|
1671 |
+
[0.0, 1.0, -0.3, -0.5],
|
1672 |
+
[0.0, 1.0, -100.0, 0.0],
|
1673 |
+
[0.0, 0.0, +0.0, -2.0],
|
1674 |
+
]
|
1675 |
+
|
1676 |
+
scaler = MaxAbsScaler()
|
1677 |
+
X_trans = scaler.fit_transform(X)
|
1678 |
+
X_expected = [
|
1679 |
+
[0.0, 1.0, 0.005, -0.5],
|
1680 |
+
[0.0, 1.0, -0.003, -0.25],
|
1681 |
+
[0.0, 1.0, -1.0, 0.0],
|
1682 |
+
[0.0, 0.0, 0.0, -1.0],
|
1683 |
+
]
|
1684 |
+
assert_array_almost_equal(X_trans, X_expected)
|
1685 |
+
|
1686 |
+
|
1687 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1688 |
+
def test_maxabs_scaler_transform_one_row_csr(csr_container):
|
1689 |
+
# Check MaxAbsScaler on transforming csr matrix with one row
|
1690 |
+
X = csr_container([[0.5, 1.0, 1.0]])
|
1691 |
+
scaler = MaxAbsScaler()
|
1692 |
+
scaler = scaler.fit(X)
|
1693 |
+
X_trans = scaler.transform(X)
|
1694 |
+
X_expected = csr_container([[1.0, 1.0, 1.0]])
|
1695 |
+
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
|
1696 |
+
X_scaled_back = scaler.inverse_transform(X_trans)
|
1697 |
+
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
|
1698 |
+
|
1699 |
+
|
1700 |
+
def test_maxabs_scaler_1d():
|
1701 |
+
# Test scaling of dataset along single axis
|
1702 |
+
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
|
1703 |
+
scaler = MaxAbsScaler(copy=True)
|
1704 |
+
X_scaled = scaler.fit(X).transform(X)
|
1705 |
+
|
1706 |
+
if isinstance(X, list):
|
1707 |
+
X = np.array(X) # cast only after scaling done
|
1708 |
+
|
1709 |
+
if _check_dim_1axis(X) == 1:
|
1710 |
+
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), np.ones(n_features))
|
1711 |
+
else:
|
1712 |
+
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.0)
|
1713 |
+
assert scaler.n_samples_seen_ == X.shape[0]
|
1714 |
+
|
1715 |
+
# check inverse transform
|
1716 |
+
X_scaled_back = scaler.inverse_transform(X_scaled)
|
1717 |
+
assert_array_almost_equal(X_scaled_back, X)
|
1718 |
+
|
1719 |
+
# Constant feature
|
1720 |
+
X = np.ones((5, 1))
|
1721 |
+
scaler = MaxAbsScaler()
|
1722 |
+
X_scaled = scaler.fit(X).transform(X)
|
1723 |
+
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.0)
|
1724 |
+
assert scaler.n_samples_seen_ == X.shape[0]
|
1725 |
+
|
1726 |
+
# function interface
|
1727 |
+
X_1d = X_1row.ravel()
|
1728 |
+
max_abs = np.abs(X_1d).max()
|
1729 |
+
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
|
1730 |
+
|
1731 |
+
|
1732 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1733 |
+
def test_maxabs_scaler_partial_fit(csr_container):
|
1734 |
+
# Test if partial_fit run over many batches of size 1 and 50
|
1735 |
+
# gives the same results as fit
|
1736 |
+
X = X_2d[:100, :]
|
1737 |
+
n = X.shape[0]
|
1738 |
+
|
1739 |
+
for chunk_size in [1, 2, 50, n, n + 42]:
|
1740 |
+
# Test mean at the end of the process
|
1741 |
+
scaler_batch = MaxAbsScaler().fit(X)
|
1742 |
+
|
1743 |
+
scaler_incr = MaxAbsScaler()
|
1744 |
+
scaler_incr_csr = MaxAbsScaler()
|
1745 |
+
scaler_incr_csc = MaxAbsScaler()
|
1746 |
+
for batch in gen_batches(n, chunk_size):
|
1747 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
1748 |
+
X_csr = csr_container(X[batch])
|
1749 |
+
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
|
1750 |
+
X_csc = csr_container(X[batch])
|
1751 |
+
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
|
1752 |
+
|
1753 |
+
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
|
1754 |
+
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csr.max_abs_)
|
1755 |
+
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csc.max_abs_)
|
1756 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
1757 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr_csr.n_samples_seen_
|
1758 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr_csc.n_samples_seen_
|
1759 |
+
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
|
1760 |
+
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
|
1761 |
+
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
|
1762 |
+
assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X))
|
1763 |
+
|
1764 |
+
# Test std after 1 step
|
1765 |
+
batch0 = slice(0, chunk_size)
|
1766 |
+
scaler_batch = MaxAbsScaler().fit(X[batch0])
|
1767 |
+
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
|
1768 |
+
|
1769 |
+
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
|
1770 |
+
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
|
1771 |
+
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
|
1772 |
+
assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X))
|
1773 |
+
|
1774 |
+
# Test std until the end of partial fits, and
|
1775 |
+
scaler_batch = MaxAbsScaler().fit(X)
|
1776 |
+
scaler_incr = MaxAbsScaler() # Clean estimator
|
1777 |
+
for i, batch in enumerate(gen_batches(n, chunk_size)):
|
1778 |
+
scaler_incr = scaler_incr.partial_fit(X[batch])
|
1779 |
+
assert_correct_incr(
|
1780 |
+
i,
|
1781 |
+
batch_start=batch.start,
|
1782 |
+
batch_stop=batch.stop,
|
1783 |
+
n=n,
|
1784 |
+
chunk_size=chunk_size,
|
1785 |
+
n_samples_seen=scaler_incr.n_samples_seen_,
|
1786 |
+
)
|
1787 |
+
|
1788 |
+
|
1789 |
+
def check_normalizer(norm, X_norm):
|
1790 |
+
"""
|
1791 |
+
Convenient checking function for `test_normalizer_l1_l2_max` and
|
1792 |
+
`test_normalizer_l1_l2_max_non_csr`
|
1793 |
+
"""
|
1794 |
+
if norm == "l1":
|
1795 |
+
row_sums = np.abs(X_norm).sum(axis=1)
|
1796 |
+
for i in range(3):
|
1797 |
+
assert_almost_equal(row_sums[i], 1.0)
|
1798 |
+
assert_almost_equal(row_sums[3], 0.0)
|
1799 |
+
elif norm == "l2":
|
1800 |
+
for i in range(3):
|
1801 |
+
assert_almost_equal(la.norm(X_norm[i]), 1.0)
|
1802 |
+
assert_almost_equal(la.norm(X_norm[3]), 0.0)
|
1803 |
+
elif norm == "max":
|
1804 |
+
row_maxs = abs(X_norm).max(axis=1)
|
1805 |
+
for i in range(3):
|
1806 |
+
assert_almost_equal(row_maxs[i], 1.0)
|
1807 |
+
assert_almost_equal(row_maxs[3], 0.0)
|
1808 |
+
|
1809 |
+
|
1810 |
+
@pytest.mark.parametrize("norm", ["l1", "l2", "max"])
|
1811 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1812 |
+
def test_normalizer_l1_l2_max(norm, csr_container):
|
1813 |
+
rng = np.random.RandomState(0)
|
1814 |
+
X_dense = rng.randn(4, 5)
|
1815 |
+
X_sparse_unpruned = csr_container(X_dense)
|
1816 |
+
|
1817 |
+
# set the row number 3 to zero
|
1818 |
+
X_dense[3, :] = 0.0
|
1819 |
+
|
1820 |
+
# set the row number 3 to zero without pruning (can happen in real life)
|
1821 |
+
indptr_3 = X_sparse_unpruned.indptr[3]
|
1822 |
+
indptr_4 = X_sparse_unpruned.indptr[4]
|
1823 |
+
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
|
1824 |
+
|
1825 |
+
# build the pruned variant using the regular constructor
|
1826 |
+
X_sparse_pruned = csr_container(X_dense)
|
1827 |
+
|
1828 |
+
# check inputs that support the no-copy optim
|
1829 |
+
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
|
1830 |
+
normalizer = Normalizer(norm=norm, copy=True)
|
1831 |
+
X_norm1 = normalizer.transform(X)
|
1832 |
+
assert X_norm1 is not X
|
1833 |
+
X_norm1 = toarray(X_norm1)
|
1834 |
+
|
1835 |
+
normalizer = Normalizer(norm=norm, copy=False)
|
1836 |
+
X_norm2 = normalizer.transform(X)
|
1837 |
+
assert X_norm2 is X
|
1838 |
+
X_norm2 = toarray(X_norm2)
|
1839 |
+
|
1840 |
+
for X_norm in (X_norm1, X_norm2):
|
1841 |
+
check_normalizer(norm, X_norm)
|
1842 |
+
|
1843 |
+
|
1844 |
+
@pytest.mark.parametrize("norm", ["l1", "l2", "max"])
|
1845 |
+
@pytest.mark.parametrize(
|
1846 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + LIL_CONTAINERS
|
1847 |
+
)
|
1848 |
+
def test_normalizer_l1_l2_max_non_csr(norm, sparse_container):
|
1849 |
+
rng = np.random.RandomState(0)
|
1850 |
+
X_dense = rng.randn(4, 5)
|
1851 |
+
|
1852 |
+
# set the row number 3 to zero
|
1853 |
+
X_dense[3, :] = 0.0
|
1854 |
+
|
1855 |
+
X = sparse_container(X_dense)
|
1856 |
+
X_norm = Normalizer(norm=norm, copy=False).transform(X)
|
1857 |
+
|
1858 |
+
assert X_norm is not X
|
1859 |
+
assert sparse.issparse(X_norm) and X_norm.format == "csr"
|
1860 |
+
|
1861 |
+
X_norm = toarray(X_norm)
|
1862 |
+
check_normalizer(norm, X_norm)
|
1863 |
+
|
1864 |
+
|
1865 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1866 |
+
def test_normalizer_max_sign(csr_container):
|
1867 |
+
# check that we normalize by a positive number even for negative data
|
1868 |
+
rng = np.random.RandomState(0)
|
1869 |
+
X_dense = rng.randn(4, 5)
|
1870 |
+
# set the row number 3 to zero
|
1871 |
+
X_dense[3, :] = 0.0
|
1872 |
+
# check for mixed data where the value with
|
1873 |
+
# largest magnitude is negative
|
1874 |
+
X_dense[2, abs(X_dense[2, :]).argmax()] *= -1
|
1875 |
+
X_all_neg = -np.abs(X_dense)
|
1876 |
+
X_all_neg_sparse = csr_container(X_all_neg)
|
1877 |
+
|
1878 |
+
for X in (X_dense, X_all_neg, X_all_neg_sparse):
|
1879 |
+
normalizer = Normalizer(norm="max")
|
1880 |
+
X_norm = normalizer.transform(X)
|
1881 |
+
assert X_norm is not X
|
1882 |
+
X_norm = toarray(X_norm)
|
1883 |
+
assert_array_equal(np.sign(X_norm), np.sign(toarray(X)))
|
1884 |
+
|
1885 |
+
|
1886 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1887 |
+
def test_normalize(csr_container):
|
1888 |
+
# Test normalize function
|
1889 |
+
# Only tests functionality not used by the tests for Normalizer.
|
1890 |
+
X = np.random.RandomState(37).randn(3, 2)
|
1891 |
+
assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T)
|
1892 |
+
|
1893 |
+
rs = np.random.RandomState(0)
|
1894 |
+
X_dense = rs.randn(10, 5)
|
1895 |
+
X_sparse = csr_container(X_dense)
|
1896 |
+
ones = np.ones((10))
|
1897 |
+
for X in (X_dense, X_sparse):
|
1898 |
+
for dtype in (np.float32, np.float64):
|
1899 |
+
for norm in ("l1", "l2"):
|
1900 |
+
X = X.astype(dtype)
|
1901 |
+
X_norm = normalize(X, norm=norm)
|
1902 |
+
assert X_norm.dtype == dtype
|
1903 |
+
|
1904 |
+
X_norm = toarray(X_norm)
|
1905 |
+
if norm == "l1":
|
1906 |
+
row_sums = np.abs(X_norm).sum(axis=1)
|
1907 |
+
else:
|
1908 |
+
X_norm_squared = X_norm**2
|
1909 |
+
row_sums = X_norm_squared.sum(axis=1)
|
1910 |
+
|
1911 |
+
assert_array_almost_equal(row_sums, ones)
|
1912 |
+
|
1913 |
+
# Test return_norm
|
1914 |
+
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
|
1915 |
+
for norm in ("l1", "l2", "max"):
|
1916 |
+
_, norms = normalize(X_dense, norm=norm, return_norm=True)
|
1917 |
+
if norm == "l1":
|
1918 |
+
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
|
1919 |
+
elif norm == "l2":
|
1920 |
+
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
|
1921 |
+
else:
|
1922 |
+
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
|
1923 |
+
|
1924 |
+
X_sparse = csr_container(X_dense)
|
1925 |
+
for norm in ("l1", "l2"):
|
1926 |
+
with pytest.raises(NotImplementedError):
|
1927 |
+
normalize(X_sparse, norm=norm, return_norm=True)
|
1928 |
+
_, norms = normalize(X_sparse, norm="max", return_norm=True)
|
1929 |
+
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
|
1930 |
+
|
1931 |
+
|
1932 |
+
@pytest.mark.parametrize(
|
1933 |
+
"constructor", [np.array, list] + CSC_CONTAINERS + CSR_CONTAINERS
|
1934 |
+
)
|
1935 |
+
def test_binarizer(constructor):
|
1936 |
+
X_ = np.array([[1, 0, 5], [2, 3, -1]])
|
1937 |
+
X = constructor(X_.copy())
|
1938 |
+
|
1939 |
+
binarizer = Binarizer(threshold=2.0, copy=True)
|
1940 |
+
X_bin = toarray(binarizer.transform(X))
|
1941 |
+
assert np.sum(X_bin == 0) == 4
|
1942 |
+
assert np.sum(X_bin == 1) == 2
|
1943 |
+
X_bin = binarizer.transform(X)
|
1944 |
+
assert sparse.issparse(X) == sparse.issparse(X_bin)
|
1945 |
+
|
1946 |
+
binarizer = Binarizer(copy=True).fit(X)
|
1947 |
+
X_bin = toarray(binarizer.transform(X))
|
1948 |
+
assert X_bin is not X
|
1949 |
+
assert np.sum(X_bin == 0) == 2
|
1950 |
+
assert np.sum(X_bin == 1) == 4
|
1951 |
+
|
1952 |
+
binarizer = Binarizer(copy=True)
|
1953 |
+
X_bin = binarizer.transform(X)
|
1954 |
+
assert X_bin is not X
|
1955 |
+
X_bin = toarray(X_bin)
|
1956 |
+
assert np.sum(X_bin == 0) == 2
|
1957 |
+
assert np.sum(X_bin == 1) == 4
|
1958 |
+
|
1959 |
+
binarizer = Binarizer(copy=False)
|
1960 |
+
X_bin = binarizer.transform(X)
|
1961 |
+
if constructor is not list:
|
1962 |
+
assert X_bin is X
|
1963 |
+
|
1964 |
+
binarizer = Binarizer(copy=False)
|
1965 |
+
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
|
1966 |
+
X_bin = binarizer.transform(X_float)
|
1967 |
+
if constructor is not list:
|
1968 |
+
assert X_bin is X_float
|
1969 |
+
|
1970 |
+
X_bin = toarray(X_bin)
|
1971 |
+
assert np.sum(X_bin == 0) == 2
|
1972 |
+
assert np.sum(X_bin == 1) == 4
|
1973 |
+
|
1974 |
+
binarizer = Binarizer(threshold=-0.5, copy=True)
|
1975 |
+
if constructor in (np.array, list):
|
1976 |
+
X = constructor(X_.copy())
|
1977 |
+
|
1978 |
+
X_bin = toarray(binarizer.transform(X))
|
1979 |
+
assert np.sum(X_bin == 0) == 1
|
1980 |
+
assert np.sum(X_bin == 1) == 5
|
1981 |
+
X_bin = binarizer.transform(X)
|
1982 |
+
|
1983 |
+
# Cannot use threshold < 0 for sparse
|
1984 |
+
if constructor in CSC_CONTAINERS:
|
1985 |
+
with pytest.raises(ValueError):
|
1986 |
+
binarizer.transform(constructor(X))
|
1987 |
+
|
1988 |
+
|
1989 |
+
def test_center_kernel():
|
1990 |
+
# Test that KernelCenterer is equivalent to StandardScaler
|
1991 |
+
# in feature space
|
1992 |
+
rng = np.random.RandomState(0)
|
1993 |
+
X_fit = rng.random_sample((5, 4))
|
1994 |
+
scaler = StandardScaler(with_std=False)
|
1995 |
+
scaler.fit(X_fit)
|
1996 |
+
X_fit_centered = scaler.transform(X_fit)
|
1997 |
+
K_fit = np.dot(X_fit, X_fit.T)
|
1998 |
+
|
1999 |
+
# center fit time matrix
|
2000 |
+
centerer = KernelCenterer()
|
2001 |
+
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
|
2002 |
+
K_fit_centered2 = centerer.fit_transform(K_fit)
|
2003 |
+
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
|
2004 |
+
|
2005 |
+
# center predict time matrix
|
2006 |
+
X_pred = rng.random_sample((2, 4))
|
2007 |
+
K_pred = np.dot(X_pred, X_fit.T)
|
2008 |
+
X_pred_centered = scaler.transform(X_pred)
|
2009 |
+
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
|
2010 |
+
K_pred_centered2 = centerer.transform(K_pred)
|
2011 |
+
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
|
2012 |
+
|
2013 |
+
# check the results coherence with the method proposed in:
|
2014 |
+
# B. Schölkopf, A. Smola, and K.R. Müller,
|
2015 |
+
# "Nonlinear component analysis as a kernel eigenvalue problem"
|
2016 |
+
# equation (B.3)
|
2017 |
+
|
2018 |
+
# K_centered3 = (I - 1_M) K (I - 1_M)
|
2019 |
+
# = K - 1_M K - K 1_M + 1_M K 1_M
|
2020 |
+
ones_M = np.ones_like(K_fit) / K_fit.shape[0]
|
2021 |
+
K_fit_centered3 = K_fit - ones_M @ K_fit - K_fit @ ones_M + ones_M @ K_fit @ ones_M
|
2022 |
+
assert_allclose(K_fit_centered, K_fit_centered3)
|
2023 |
+
|
2024 |
+
# K_test_centered3 = (K_test - 1'_M K)(I - 1_M)
|
2025 |
+
# = K_test - 1'_M K - K_test 1_M + 1'_M K 1_M
|
2026 |
+
ones_prime_M = np.ones_like(K_pred) / K_fit.shape[0]
|
2027 |
+
K_pred_centered3 = (
|
2028 |
+
K_pred - ones_prime_M @ K_fit - K_pred @ ones_M + ones_prime_M @ K_fit @ ones_M
|
2029 |
+
)
|
2030 |
+
assert_allclose(K_pred_centered, K_pred_centered3)
|
2031 |
+
|
2032 |
+
|
2033 |
+
def test_kernelcenterer_non_linear_kernel():
|
2034 |
+
"""Check kernel centering for non-linear kernel."""
|
2035 |
+
rng = np.random.RandomState(0)
|
2036 |
+
X, X_test = rng.randn(100, 50), rng.randn(20, 50)
|
2037 |
+
|
2038 |
+
def phi(X):
|
2039 |
+
"""Our mapping function phi."""
|
2040 |
+
return np.vstack(
|
2041 |
+
[
|
2042 |
+
np.clip(X, a_min=0, a_max=None),
|
2043 |
+
-np.clip(X, a_min=None, a_max=0),
|
2044 |
+
]
|
2045 |
+
)
|
2046 |
+
|
2047 |
+
phi_X = phi(X)
|
2048 |
+
phi_X_test = phi(X_test)
|
2049 |
+
|
2050 |
+
# centered the projection
|
2051 |
+
scaler = StandardScaler(with_std=False)
|
2052 |
+
phi_X_center = scaler.fit_transform(phi_X)
|
2053 |
+
phi_X_test_center = scaler.transform(phi_X_test)
|
2054 |
+
|
2055 |
+
# create the different kernel
|
2056 |
+
K = phi_X @ phi_X.T
|
2057 |
+
K_test = phi_X_test @ phi_X.T
|
2058 |
+
K_center = phi_X_center @ phi_X_center.T
|
2059 |
+
K_test_center = phi_X_test_center @ phi_X_center.T
|
2060 |
+
|
2061 |
+
kernel_centerer = KernelCenterer()
|
2062 |
+
kernel_centerer.fit(K)
|
2063 |
+
|
2064 |
+
assert_allclose(kernel_centerer.transform(K), K_center)
|
2065 |
+
assert_allclose(kernel_centerer.transform(K_test), K_test_center)
|
2066 |
+
|
2067 |
+
# check the results coherence with the method proposed in:
|
2068 |
+
# B. Schölkopf, A. Smola, and K.R. Müller,
|
2069 |
+
# "Nonlinear component analysis as a kernel eigenvalue problem"
|
2070 |
+
# equation (B.3)
|
2071 |
+
|
2072 |
+
# K_centered = (I - 1_M) K (I - 1_M)
|
2073 |
+
# = K - 1_M K - K 1_M + 1_M K 1_M
|
2074 |
+
ones_M = np.ones_like(K) / K.shape[0]
|
2075 |
+
K_centered = K - ones_M @ K - K @ ones_M + ones_M @ K @ ones_M
|
2076 |
+
assert_allclose(kernel_centerer.transform(K), K_centered)
|
2077 |
+
|
2078 |
+
# K_test_centered = (K_test - 1'_M K)(I - 1_M)
|
2079 |
+
# = K_test - 1'_M K - K_test 1_M + 1'_M K 1_M
|
2080 |
+
ones_prime_M = np.ones_like(K_test) / K.shape[0]
|
2081 |
+
K_test_centered = (
|
2082 |
+
K_test - ones_prime_M @ K - K_test @ ones_M + ones_prime_M @ K @ ones_M
|
2083 |
+
)
|
2084 |
+
assert_allclose(kernel_centerer.transform(K_test), K_test_centered)
|
2085 |
+
|
2086 |
+
|
2087 |
+
def test_cv_pipeline_precomputed():
|
2088 |
+
# Cross-validate a regression on four coplanar points with the same
|
2089 |
+
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
|
2090 |
+
# is treated as a pairwise operation.
|
2091 |
+
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
|
2092 |
+
y_true = np.ones((4,))
|
2093 |
+
K = X.dot(X.T)
|
2094 |
+
kcent = KernelCenterer()
|
2095 |
+
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
|
2096 |
+
|
2097 |
+
# did the pipeline set the pairwise attribute?
|
2098 |
+
assert pipeline._get_tags()["pairwise"]
|
2099 |
+
|
2100 |
+
# test cross-validation, score should be almost perfect
|
2101 |
+
# NB: this test is pretty vacuous -- it's mainly to test integration
|
2102 |
+
# of Pipeline and KernelCenterer
|
2103 |
+
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
|
2104 |
+
assert_array_almost_equal(y_true, y_pred)
|
2105 |
+
|
2106 |
+
|
2107 |
+
def test_fit_transform():
|
2108 |
+
rng = np.random.RandomState(0)
|
2109 |
+
X = rng.random_sample((5, 4))
|
2110 |
+
for obj in (StandardScaler(), Normalizer(), Binarizer()):
|
2111 |
+
X_transformed = obj.fit(X).transform(X)
|
2112 |
+
X_transformed2 = obj.fit_transform(X)
|
2113 |
+
assert_array_equal(X_transformed, X_transformed2)
|
2114 |
+
|
2115 |
+
|
2116 |
+
def test_add_dummy_feature():
|
2117 |
+
X = [[1, 0], [0, 1], [0, 1]]
|
2118 |
+
X = add_dummy_feature(X)
|
2119 |
+
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
|
2120 |
+
|
2121 |
+
|
2122 |
+
@pytest.mark.parametrize(
|
2123 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
2124 |
+
)
|
2125 |
+
def test_add_dummy_feature_sparse(sparse_container):
|
2126 |
+
X = sparse_container([[1, 0], [0, 1], [0, 1]])
|
2127 |
+
desired_format = X.format
|
2128 |
+
X = add_dummy_feature(X)
|
2129 |
+
assert sparse.issparse(X) and X.format == desired_format, X
|
2130 |
+
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
|
2131 |
+
|
2132 |
+
|
2133 |
+
def test_fit_cold_start():
|
2134 |
+
X = iris.data
|
2135 |
+
X_2d = X[:, :2]
|
2136 |
+
|
2137 |
+
# Scalers that have a partial_fit method
|
2138 |
+
scalers = [
|
2139 |
+
StandardScaler(with_mean=False, with_std=False),
|
2140 |
+
MinMaxScaler(),
|
2141 |
+
MaxAbsScaler(),
|
2142 |
+
]
|
2143 |
+
|
2144 |
+
for scaler in scalers:
|
2145 |
+
scaler.fit_transform(X)
|
2146 |
+
# with a different shape, this may break the scaler unless the internal
|
2147 |
+
# state is reset
|
2148 |
+
scaler.fit_transform(X_2d)
|
2149 |
+
|
2150 |
+
|
2151 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2152 |
+
def test_power_transformer_notfitted(method):
|
2153 |
+
pt = PowerTransformer(method=method)
|
2154 |
+
X = np.abs(X_1col)
|
2155 |
+
with pytest.raises(NotFittedError):
|
2156 |
+
pt.transform(X)
|
2157 |
+
with pytest.raises(NotFittedError):
|
2158 |
+
pt.inverse_transform(X)
|
2159 |
+
|
2160 |
+
|
2161 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2162 |
+
@pytest.mark.parametrize("standardize", [True, False])
|
2163 |
+
@pytest.mark.parametrize("X", [X_1col, X_2d])
|
2164 |
+
def test_power_transformer_inverse(method, standardize, X):
|
2165 |
+
# Make sure we get the original input when applying transform and then
|
2166 |
+
# inverse transform
|
2167 |
+
X = np.abs(X) if method == "box-cox" else X
|
2168 |
+
pt = PowerTransformer(method=method, standardize=standardize)
|
2169 |
+
X_trans = pt.fit_transform(X)
|
2170 |
+
assert_almost_equal(X, pt.inverse_transform(X_trans))
|
2171 |
+
|
2172 |
+
|
2173 |
+
def test_power_transformer_1d():
|
2174 |
+
X = np.abs(X_1col)
|
2175 |
+
|
2176 |
+
for standardize in [True, False]:
|
2177 |
+
pt = PowerTransformer(method="box-cox", standardize=standardize)
|
2178 |
+
|
2179 |
+
X_trans = pt.fit_transform(X)
|
2180 |
+
X_trans_func = power_transform(X, method="box-cox", standardize=standardize)
|
2181 |
+
|
2182 |
+
X_expected, lambda_expected = stats.boxcox(X.flatten())
|
2183 |
+
|
2184 |
+
if standardize:
|
2185 |
+
X_expected = scale(X_expected)
|
2186 |
+
|
2187 |
+
assert_almost_equal(X_expected.reshape(-1, 1), X_trans)
|
2188 |
+
assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func)
|
2189 |
+
|
2190 |
+
assert_almost_equal(X, pt.inverse_transform(X_trans))
|
2191 |
+
assert_almost_equal(lambda_expected, pt.lambdas_[0])
|
2192 |
+
|
2193 |
+
assert len(pt.lambdas_) == X.shape[1]
|
2194 |
+
assert isinstance(pt.lambdas_, np.ndarray)
|
2195 |
+
|
2196 |
+
|
2197 |
+
def test_power_transformer_2d():
|
2198 |
+
X = np.abs(X_2d)
|
2199 |
+
|
2200 |
+
for standardize in [True, False]:
|
2201 |
+
pt = PowerTransformer(method="box-cox", standardize=standardize)
|
2202 |
+
|
2203 |
+
X_trans_class = pt.fit_transform(X)
|
2204 |
+
X_trans_func = power_transform(X, method="box-cox", standardize=standardize)
|
2205 |
+
|
2206 |
+
for X_trans in [X_trans_class, X_trans_func]:
|
2207 |
+
for j in range(X_trans.shape[1]):
|
2208 |
+
X_expected, lmbda = stats.boxcox(X[:, j].flatten())
|
2209 |
+
|
2210 |
+
if standardize:
|
2211 |
+
X_expected = scale(X_expected)
|
2212 |
+
|
2213 |
+
assert_almost_equal(X_trans[:, j], X_expected)
|
2214 |
+
assert_almost_equal(lmbda, pt.lambdas_[j])
|
2215 |
+
|
2216 |
+
# Test inverse transformation
|
2217 |
+
X_inv = pt.inverse_transform(X_trans)
|
2218 |
+
assert_array_almost_equal(X_inv, X)
|
2219 |
+
|
2220 |
+
assert len(pt.lambdas_) == X.shape[1]
|
2221 |
+
assert isinstance(pt.lambdas_, np.ndarray)
|
2222 |
+
|
2223 |
+
|
2224 |
+
def test_power_transformer_boxcox_strictly_positive_exception():
|
2225 |
+
# Exceptions should be raised for negative arrays and zero arrays when
|
2226 |
+
# method is boxcox
|
2227 |
+
|
2228 |
+
pt = PowerTransformer(method="box-cox")
|
2229 |
+
pt.fit(np.abs(X_2d))
|
2230 |
+
X_with_negatives = X_2d
|
2231 |
+
not_positive_message = "strictly positive"
|
2232 |
+
|
2233 |
+
with pytest.raises(ValueError, match=not_positive_message):
|
2234 |
+
pt.transform(X_with_negatives)
|
2235 |
+
|
2236 |
+
with pytest.raises(ValueError, match=not_positive_message):
|
2237 |
+
pt.fit(X_with_negatives)
|
2238 |
+
|
2239 |
+
with pytest.raises(ValueError, match=not_positive_message):
|
2240 |
+
power_transform(X_with_negatives, method="box-cox")
|
2241 |
+
|
2242 |
+
with pytest.raises(ValueError, match=not_positive_message):
|
2243 |
+
pt.transform(np.zeros(X_2d.shape))
|
2244 |
+
|
2245 |
+
with pytest.raises(ValueError, match=not_positive_message):
|
2246 |
+
pt.fit(np.zeros(X_2d.shape))
|
2247 |
+
|
2248 |
+
with pytest.raises(ValueError, match=not_positive_message):
|
2249 |
+
power_transform(np.zeros(X_2d.shape), method="box-cox")
|
2250 |
+
|
2251 |
+
|
2252 |
+
@pytest.mark.parametrize("X", [X_2d, np.abs(X_2d), -np.abs(X_2d), np.zeros(X_2d.shape)])
|
2253 |
+
def test_power_transformer_yeojohnson_any_input(X):
|
2254 |
+
# Yeo-Johnson method should support any kind of input
|
2255 |
+
power_transform(X, method="yeo-johnson")
|
2256 |
+
|
2257 |
+
|
2258 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2259 |
+
def test_power_transformer_shape_exception(method):
|
2260 |
+
pt = PowerTransformer(method=method)
|
2261 |
+
X = np.abs(X_2d)
|
2262 |
+
pt.fit(X)
|
2263 |
+
|
2264 |
+
# Exceptions should be raised for arrays with different num_columns
|
2265 |
+
# than during fitting
|
2266 |
+
wrong_shape_message = (
|
2267 |
+
r"X has \d+ features, but PowerTransformer is " r"expecting \d+ features"
|
2268 |
+
)
|
2269 |
+
|
2270 |
+
with pytest.raises(ValueError, match=wrong_shape_message):
|
2271 |
+
pt.transform(X[:, 0:1])
|
2272 |
+
|
2273 |
+
with pytest.raises(ValueError, match=wrong_shape_message):
|
2274 |
+
pt.inverse_transform(X[:, 0:1])
|
2275 |
+
|
2276 |
+
|
2277 |
+
def test_power_transformer_lambda_zero():
|
2278 |
+
pt = PowerTransformer(method="box-cox", standardize=False)
|
2279 |
+
X = np.abs(X_2d)[:, 0:1]
|
2280 |
+
|
2281 |
+
# Test the lambda = 0 case
|
2282 |
+
pt.lambdas_ = np.array([0])
|
2283 |
+
X_trans = pt.transform(X)
|
2284 |
+
assert_array_almost_equal(pt.inverse_transform(X_trans), X)
|
2285 |
+
|
2286 |
+
|
2287 |
+
def test_power_transformer_lambda_one():
|
2288 |
+
# Make sure lambda = 1 corresponds to the identity for yeo-johnson
|
2289 |
+
pt = PowerTransformer(method="yeo-johnson", standardize=False)
|
2290 |
+
X = np.abs(X_2d)[:, 0:1]
|
2291 |
+
|
2292 |
+
pt.lambdas_ = np.array([1])
|
2293 |
+
X_trans = pt.transform(X)
|
2294 |
+
assert_array_almost_equal(X_trans, X)
|
2295 |
+
|
2296 |
+
|
2297 |
+
@pytest.mark.parametrize(
|
2298 |
+
"method, lmbda",
|
2299 |
+
[
|
2300 |
+
("box-cox", 0.1),
|
2301 |
+
("box-cox", 0.5),
|
2302 |
+
("yeo-johnson", 0.1),
|
2303 |
+
("yeo-johnson", 0.5),
|
2304 |
+
("yeo-johnson", 1.0),
|
2305 |
+
],
|
2306 |
+
)
|
2307 |
+
def test_optimization_power_transformer(method, lmbda):
|
2308 |
+
# Test the optimization procedure:
|
2309 |
+
# - set a predefined value for lambda
|
2310 |
+
# - apply inverse_transform to a normal dist (we get X_inv)
|
2311 |
+
# - apply fit_transform to X_inv (we get X_inv_trans)
|
2312 |
+
# - check that X_inv_trans is roughly equal to X
|
2313 |
+
|
2314 |
+
rng = np.random.RandomState(0)
|
2315 |
+
n_samples = 20000
|
2316 |
+
X = rng.normal(loc=0, scale=1, size=(n_samples, 1))
|
2317 |
+
|
2318 |
+
pt = PowerTransformer(method=method, standardize=False)
|
2319 |
+
pt.lambdas_ = [lmbda]
|
2320 |
+
X_inv = pt.inverse_transform(X)
|
2321 |
+
|
2322 |
+
pt = PowerTransformer(method=method, standardize=False)
|
2323 |
+
X_inv_trans = pt.fit_transform(X_inv)
|
2324 |
+
|
2325 |
+
assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples, decimal=2)
|
2326 |
+
assert_almost_equal(0, X_inv_trans.mean(), decimal=1)
|
2327 |
+
assert_almost_equal(1, X_inv_trans.std(), decimal=1)
|
2328 |
+
|
2329 |
+
|
2330 |
+
def test_yeo_johnson_darwin_example():
|
2331 |
+
# test from original paper "A new family of power transformations to
|
2332 |
+
# improve normality or symmetry" by Yeo and Johnson.
|
2333 |
+
X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3, 7.5, -6.0]
|
2334 |
+
X = np.array(X).reshape(-1, 1)
|
2335 |
+
lmbda = PowerTransformer(method="yeo-johnson").fit(X).lambdas_
|
2336 |
+
assert np.allclose(lmbda, 1.305, atol=1e-3)
|
2337 |
+
|
2338 |
+
|
2339 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2340 |
+
def test_power_transformer_nans(method):
|
2341 |
+
# Make sure lambda estimation is not influenced by NaN values
|
2342 |
+
# and that transform() supports NaN silently
|
2343 |
+
|
2344 |
+
X = np.abs(X_1col)
|
2345 |
+
pt = PowerTransformer(method=method)
|
2346 |
+
pt.fit(X)
|
2347 |
+
lmbda_no_nans = pt.lambdas_[0]
|
2348 |
+
|
2349 |
+
# concat nans at the end and check lambda stays the same
|
2350 |
+
X = np.concatenate([X, np.full_like(X, np.nan)])
|
2351 |
+
X = shuffle(X, random_state=0)
|
2352 |
+
|
2353 |
+
pt.fit(X)
|
2354 |
+
lmbda_nans = pt.lambdas_[0]
|
2355 |
+
|
2356 |
+
assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5)
|
2357 |
+
|
2358 |
+
X_trans = pt.transform(X)
|
2359 |
+
assert_array_equal(np.isnan(X_trans), np.isnan(X))
|
2360 |
+
|
2361 |
+
|
2362 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2363 |
+
@pytest.mark.parametrize("standardize", [True, False])
|
2364 |
+
def test_power_transformer_fit_transform(method, standardize):
|
2365 |
+
# check that fit_transform() and fit().transform() return the same values
|
2366 |
+
X = X_1col
|
2367 |
+
if method == "box-cox":
|
2368 |
+
X = np.abs(X)
|
2369 |
+
|
2370 |
+
pt = PowerTransformer(method, standardize=standardize)
|
2371 |
+
assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X))
|
2372 |
+
|
2373 |
+
|
2374 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2375 |
+
@pytest.mark.parametrize("standardize", [True, False])
|
2376 |
+
def test_power_transformer_copy_True(method, standardize):
|
2377 |
+
# Check that neither fit, transform, fit_transform nor inverse_transform
|
2378 |
+
# modify X inplace when copy=True
|
2379 |
+
X = X_1col
|
2380 |
+
if method == "box-cox":
|
2381 |
+
X = np.abs(X)
|
2382 |
+
|
2383 |
+
X_original = X.copy()
|
2384 |
+
assert X is not X_original # sanity checks
|
2385 |
+
assert_array_almost_equal(X, X_original)
|
2386 |
+
|
2387 |
+
pt = PowerTransformer(method, standardize=standardize, copy=True)
|
2388 |
+
|
2389 |
+
pt.fit(X)
|
2390 |
+
assert_array_almost_equal(X, X_original)
|
2391 |
+
X_trans = pt.transform(X)
|
2392 |
+
assert X_trans is not X
|
2393 |
+
|
2394 |
+
X_trans = pt.fit_transform(X)
|
2395 |
+
assert_array_almost_equal(X, X_original)
|
2396 |
+
assert X_trans is not X
|
2397 |
+
|
2398 |
+
X_inv_trans = pt.inverse_transform(X_trans)
|
2399 |
+
assert X_trans is not X_inv_trans
|
2400 |
+
|
2401 |
+
|
2402 |
+
@pytest.mark.parametrize("method", ["box-cox", "yeo-johnson"])
|
2403 |
+
@pytest.mark.parametrize("standardize", [True, False])
|
2404 |
+
def test_power_transformer_copy_False(method, standardize):
|
2405 |
+
# check that when copy=False fit doesn't change X inplace but transform,
|
2406 |
+
# fit_transform and inverse_transform do.
|
2407 |
+
X = X_1col
|
2408 |
+
if method == "box-cox":
|
2409 |
+
X = np.abs(X)
|
2410 |
+
|
2411 |
+
X_original = X.copy()
|
2412 |
+
assert X is not X_original # sanity checks
|
2413 |
+
assert_array_almost_equal(X, X_original)
|
2414 |
+
|
2415 |
+
pt = PowerTransformer(method, standardize=standardize, copy=False)
|
2416 |
+
|
2417 |
+
pt.fit(X)
|
2418 |
+
assert_array_almost_equal(X, X_original) # fit didn't change X
|
2419 |
+
|
2420 |
+
X_trans = pt.transform(X)
|
2421 |
+
assert X_trans is X
|
2422 |
+
|
2423 |
+
if method == "box-cox":
|
2424 |
+
X = np.abs(X)
|
2425 |
+
X_trans = pt.fit_transform(X)
|
2426 |
+
assert X_trans is X
|
2427 |
+
|
2428 |
+
X_inv_trans = pt.inverse_transform(X_trans)
|
2429 |
+
assert X_trans is X_inv_trans
|
2430 |
+
|
2431 |
+
|
2432 |
+
def test_power_transformer_box_cox_raise_all_nans_col():
|
2433 |
+
"""Check that box-cox raises informative when a column contains all nans.
|
2434 |
+
|
2435 |
+
Non-regression test for gh-26303
|
2436 |
+
"""
|
2437 |
+
X = rng.random_sample((4, 5))
|
2438 |
+
X[:, 0] = np.nan
|
2439 |
+
|
2440 |
+
err_msg = "Column must not be all nan."
|
2441 |
+
|
2442 |
+
pt = PowerTransformer(method="box-cox")
|
2443 |
+
with pytest.raises(ValueError, match=err_msg):
|
2444 |
+
pt.fit_transform(X)
|
2445 |
+
|
2446 |
+
|
2447 |
+
@pytest.mark.parametrize(
|
2448 |
+
"X_2",
|
2449 |
+
[sparse.random(10, 1, density=0.8, random_state=0)]
|
2450 |
+
+ [
|
2451 |
+
csr_container(np.full((10, 1), fill_value=np.nan))
|
2452 |
+
for csr_container in CSR_CONTAINERS
|
2453 |
+
],
|
2454 |
+
)
|
2455 |
+
def test_standard_scaler_sparse_partial_fit_finite_variance(X_2):
|
2456 |
+
# non-regression test for:
|
2457 |
+
# https://github.com/scikit-learn/scikit-learn/issues/16448
|
2458 |
+
X_1 = sparse.random(5, 1, density=0.8)
|
2459 |
+
scaler = StandardScaler(with_mean=False)
|
2460 |
+
scaler.fit(X_1).partial_fit(X_2)
|
2461 |
+
assert np.isfinite(scaler.var_[0])
|
2462 |
+
|
2463 |
+
|
2464 |
+
@pytest.mark.parametrize("feature_range", [(0, 1), (-10, 10)])
|
2465 |
+
def test_minmax_scaler_clip(feature_range):
|
2466 |
+
# test behaviour of the parameter 'clip' in MinMaxScaler
|
2467 |
+
X = iris.data
|
2468 |
+
scaler = MinMaxScaler(feature_range=feature_range, clip=True).fit(X)
|
2469 |
+
X_min, X_max = np.min(X, axis=0), np.max(X, axis=0)
|
2470 |
+
X_test = [np.r_[X_min[:2] - 10, X_max[2:] + 10]]
|
2471 |
+
X_transformed = scaler.transform(X_test)
|
2472 |
+
assert_allclose(
|
2473 |
+
X_transformed,
|
2474 |
+
[[feature_range[0], feature_range[0], feature_range[1], feature_range[1]]],
|
2475 |
+
)
|
2476 |
+
|
2477 |
+
|
2478 |
+
def test_standard_scaler_raise_error_for_1d_input():
|
2479 |
+
"""Check that `inverse_transform` from `StandardScaler` raises an error
|
2480 |
+
with 1D array.
|
2481 |
+
Non-regression test for:
|
2482 |
+
https://github.com/scikit-learn/scikit-learn/issues/19518
|
2483 |
+
"""
|
2484 |
+
scaler = StandardScaler().fit(X_2d)
|
2485 |
+
err_msg = "Expected 2D array, got 1D array instead"
|
2486 |
+
with pytest.raises(ValueError, match=err_msg):
|
2487 |
+
scaler.inverse_transform(X_2d[:, 0])
|
2488 |
+
|
2489 |
+
|
2490 |
+
def test_power_transformer_significantly_non_gaussian():
|
2491 |
+
"""Check that significantly non-Gaussian data before transforms correctly.
|
2492 |
+
|
2493 |
+
For some explored lambdas, the transformed data may be constant and will
|
2494 |
+
be rejected. Non-regression test for
|
2495 |
+
https://github.com/scikit-learn/scikit-learn/issues/14959
|
2496 |
+
"""
|
2497 |
+
|
2498 |
+
X_non_gaussian = 1e6 * np.array(
|
2499 |
+
[0.6, 2.0, 3.0, 4.0] * 4 + [11, 12, 12, 16, 17, 20, 85, 90], dtype=np.float64
|
2500 |
+
).reshape(-1, 1)
|
2501 |
+
pt = PowerTransformer()
|
2502 |
+
|
2503 |
+
with warnings.catch_warnings():
|
2504 |
+
warnings.simplefilter("error", RuntimeWarning)
|
2505 |
+
X_trans = pt.fit_transform(X_non_gaussian)
|
2506 |
+
|
2507 |
+
assert not np.any(np.isnan(X_trans))
|
2508 |
+
assert X_trans.mean() == pytest.approx(0.0)
|
2509 |
+
assert X_trans.std() == pytest.approx(1.0)
|
2510 |
+
assert X_trans.min() > -2
|
2511 |
+
assert X_trans.max() < 2
|
2512 |
+
|
2513 |
+
|
2514 |
+
@pytest.mark.parametrize(
|
2515 |
+
"Transformer",
|
2516 |
+
[
|
2517 |
+
MinMaxScaler,
|
2518 |
+
MaxAbsScaler,
|
2519 |
+
RobustScaler,
|
2520 |
+
StandardScaler,
|
2521 |
+
QuantileTransformer,
|
2522 |
+
PowerTransformer,
|
2523 |
+
],
|
2524 |
+
)
|
2525 |
+
def test_one_to_one_features(Transformer):
|
2526 |
+
"""Check one-to-one transformers give correct feature names."""
|
2527 |
+
tr = Transformer().fit(iris.data)
|
2528 |
+
names_out = tr.get_feature_names_out(iris.feature_names)
|
2529 |
+
assert_array_equal(names_out, iris.feature_names)
|
2530 |
+
|
2531 |
+
|
2532 |
+
@pytest.mark.parametrize(
|
2533 |
+
"Transformer",
|
2534 |
+
[
|
2535 |
+
MinMaxScaler,
|
2536 |
+
MaxAbsScaler,
|
2537 |
+
RobustScaler,
|
2538 |
+
StandardScaler,
|
2539 |
+
QuantileTransformer,
|
2540 |
+
PowerTransformer,
|
2541 |
+
Normalizer,
|
2542 |
+
Binarizer,
|
2543 |
+
],
|
2544 |
+
)
|
2545 |
+
def test_one_to_one_features_pandas(Transformer):
|
2546 |
+
"""Check one-to-one transformers give correct feature names."""
|
2547 |
+
pd = pytest.importorskip("pandas")
|
2548 |
+
|
2549 |
+
df = pd.DataFrame(iris.data, columns=iris.feature_names)
|
2550 |
+
tr = Transformer().fit(df)
|
2551 |
+
|
2552 |
+
names_out_df_default = tr.get_feature_names_out()
|
2553 |
+
assert_array_equal(names_out_df_default, iris.feature_names)
|
2554 |
+
|
2555 |
+
names_out_df_valid_in = tr.get_feature_names_out(iris.feature_names)
|
2556 |
+
assert_array_equal(names_out_df_valid_in, iris.feature_names)
|
2557 |
+
|
2558 |
+
msg = re.escape("input_features is not equal to feature_names_in_")
|
2559 |
+
with pytest.raises(ValueError, match=msg):
|
2560 |
+
invalid_names = list("abcd")
|
2561 |
+
tr.get_feature_names_out(invalid_names)
|
2562 |
+
|
2563 |
+
|
2564 |
+
def test_kernel_centerer_feature_names_out():
|
2565 |
+
"""Test that kernel centerer `feature_names_out`."""
|
2566 |
+
|
2567 |
+
rng = np.random.RandomState(0)
|
2568 |
+
X = rng.random_sample((6, 4))
|
2569 |
+
X_pairwise = linear_kernel(X)
|
2570 |
+
centerer = KernelCenterer().fit(X_pairwise)
|
2571 |
+
|
2572 |
+
names_out = centerer.get_feature_names_out()
|
2573 |
+
samples_out2 = X_pairwise.shape[1]
|
2574 |
+
assert_array_equal(names_out, [f"kernelcenterer{i}" for i in range(samples_out2)])
|
2575 |
+
|
2576 |
+
|
2577 |
+
@pytest.mark.parametrize("standardize", [True, False])
|
2578 |
+
def test_power_transformer_constant_feature(standardize):
|
2579 |
+
"""Check that PowerTransfomer leaves constant features unchanged."""
|
2580 |
+
X = [[-2, 0, 2], [-2, 0, 2], [-2, 0, 2]]
|
2581 |
+
|
2582 |
+
pt = PowerTransformer(method="yeo-johnson", standardize=standardize).fit(X)
|
2583 |
+
|
2584 |
+
assert_allclose(pt.lambdas_, [1, 1, 1])
|
2585 |
+
|
2586 |
+
Xft = pt.fit_transform(X)
|
2587 |
+
Xt = pt.transform(X)
|
2588 |
+
|
2589 |
+
for Xt_ in [Xft, Xt]:
|
2590 |
+
if standardize:
|
2591 |
+
assert_allclose(Xt_, np.zeros_like(X))
|
2592 |
+
else:
|
2593 |
+
assert_allclose(Xt_, X)
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_discretization.py
ADDED
@@ -0,0 +1,503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
import scipy.sparse as sp
|
6 |
+
|
7 |
+
from sklearn import clone
|
8 |
+
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder
|
9 |
+
from sklearn.utils._testing import (
|
10 |
+
assert_allclose,
|
11 |
+
assert_allclose_dense_sparse,
|
12 |
+
assert_array_almost_equal,
|
13 |
+
assert_array_equal,
|
14 |
+
)
|
15 |
+
|
16 |
+
X = [[-2, 1.5, -4, -1], [-1, 2.5, -3, -0.5], [0, 3.5, -2, 0.5], [1, 4.5, -1, 2]]
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.mark.parametrize(
|
20 |
+
"strategy, expected, sample_weight",
|
21 |
+
[
|
22 |
+
("uniform", [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]], None),
|
23 |
+
("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]], None),
|
24 |
+
("quantile", [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]], None),
|
25 |
+
(
|
26 |
+
"quantile",
|
27 |
+
[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]],
|
28 |
+
[1, 1, 2, 1],
|
29 |
+
),
|
30 |
+
(
|
31 |
+
"quantile",
|
32 |
+
[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]],
|
33 |
+
[1, 1, 1, 1],
|
34 |
+
),
|
35 |
+
(
|
36 |
+
"quantile",
|
37 |
+
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]],
|
38 |
+
[0, 1, 1, 1],
|
39 |
+
),
|
40 |
+
(
|
41 |
+
"kmeans",
|
42 |
+
[[0, 0, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1], [2, 2, 2, 2]],
|
43 |
+
[1, 0, 3, 1],
|
44 |
+
),
|
45 |
+
(
|
46 |
+
"kmeans",
|
47 |
+
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]],
|
48 |
+
[1, 1, 1, 1],
|
49 |
+
),
|
50 |
+
],
|
51 |
+
)
|
52 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
53 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
54 |
+
def test_fit_transform(strategy, expected, sample_weight):
|
55 |
+
est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy)
|
56 |
+
est.fit(X, sample_weight=sample_weight)
|
57 |
+
assert_array_equal(expected, est.transform(X))
|
58 |
+
|
59 |
+
|
60 |
+
def test_valid_n_bins():
|
61 |
+
KBinsDiscretizer(n_bins=2).fit_transform(X)
|
62 |
+
KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X)
|
63 |
+
assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int)
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.mark.parametrize("strategy", ["uniform"])
|
67 |
+
def test_kbinsdiscretizer_wrong_strategy_with_weights(strategy):
|
68 |
+
"""Check that we raise an error when the wrong strategy is used."""
|
69 |
+
sample_weight = np.ones(shape=(len(X)))
|
70 |
+
est = KBinsDiscretizer(n_bins=3, strategy=strategy)
|
71 |
+
err_msg = (
|
72 |
+
"`sample_weight` was provided but it cannot be used with strategy='uniform'."
|
73 |
+
)
|
74 |
+
with pytest.raises(ValueError, match=err_msg):
|
75 |
+
est.fit(X, sample_weight=sample_weight)
|
76 |
+
|
77 |
+
|
78 |
+
def test_invalid_n_bins_array():
|
79 |
+
# Bad shape
|
80 |
+
n_bins = np.full((2, 4), 2.0)
|
81 |
+
est = KBinsDiscretizer(n_bins=n_bins)
|
82 |
+
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
|
83 |
+
with pytest.raises(ValueError, match=err_msg):
|
84 |
+
est.fit_transform(X)
|
85 |
+
|
86 |
+
# Incorrect number of features
|
87 |
+
n_bins = [1, 2, 2]
|
88 |
+
est = KBinsDiscretizer(n_bins=n_bins)
|
89 |
+
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
|
90 |
+
with pytest.raises(ValueError, match=err_msg):
|
91 |
+
est.fit_transform(X)
|
92 |
+
|
93 |
+
# Bad bin values
|
94 |
+
n_bins = [1, 2, 2, 1]
|
95 |
+
est = KBinsDiscretizer(n_bins=n_bins)
|
96 |
+
err_msg = (
|
97 |
+
"KBinsDiscretizer received an invalid number of bins "
|
98 |
+
"at indices 0, 3. Number of bins must be at least 2, "
|
99 |
+
"and must be an int."
|
100 |
+
)
|
101 |
+
with pytest.raises(ValueError, match=err_msg):
|
102 |
+
est.fit_transform(X)
|
103 |
+
|
104 |
+
# Float bin values
|
105 |
+
n_bins = [2.1, 2, 2.1, 2]
|
106 |
+
est = KBinsDiscretizer(n_bins=n_bins)
|
107 |
+
err_msg = (
|
108 |
+
"KBinsDiscretizer received an invalid number of bins "
|
109 |
+
"at indices 0, 2. Number of bins must be at least 2, "
|
110 |
+
"and must be an int."
|
111 |
+
)
|
112 |
+
with pytest.raises(ValueError, match=err_msg):
|
113 |
+
est.fit_transform(X)
|
114 |
+
|
115 |
+
|
116 |
+
@pytest.mark.parametrize(
|
117 |
+
"strategy, expected, sample_weight",
|
118 |
+
[
|
119 |
+
("uniform", [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]], None),
|
120 |
+
("kmeans", [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]], None),
|
121 |
+
("quantile", [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]], None),
|
122 |
+
(
|
123 |
+
"quantile",
|
124 |
+
[[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]],
|
125 |
+
[1, 1, 3, 1],
|
126 |
+
),
|
127 |
+
(
|
128 |
+
"quantile",
|
129 |
+
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]],
|
130 |
+
[0, 1, 3, 1],
|
131 |
+
),
|
132 |
+
# (
|
133 |
+
# "quantile",
|
134 |
+
# [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]],
|
135 |
+
# [1, 1, 1, 1],
|
136 |
+
# ),
|
137 |
+
#
|
138 |
+
# TODO: This test case above aims to test if the case where an array of
|
139 |
+
# ones passed in sample_weight parameter is equal to the case when
|
140 |
+
# sample_weight is None.
|
141 |
+
# Unfortunately, the behavior of `_weighted_percentile` when
|
142 |
+
# `sample_weight = [1, 1, 1, 1]` are currently not equivalent.
|
143 |
+
# This problem has been addressed in issue :
|
144 |
+
# https://github.com/scikit-learn/scikit-learn/issues/17370
|
145 |
+
(
|
146 |
+
"kmeans",
|
147 |
+
[[0, 0, 0, 0], [0, 1, 1, 0], [1, 1, 1, 1], [1, 2, 2, 2]],
|
148 |
+
[1, 0, 3, 1],
|
149 |
+
),
|
150 |
+
],
|
151 |
+
)
|
152 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
153 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
154 |
+
def test_fit_transform_n_bins_array(strategy, expected, sample_weight):
|
155 |
+
est = KBinsDiscretizer(
|
156 |
+
n_bins=[2, 3, 3, 3], encode="ordinal", strategy=strategy
|
157 |
+
).fit(X, sample_weight=sample_weight)
|
158 |
+
assert_array_equal(expected, est.transform(X))
|
159 |
+
|
160 |
+
# test the shape of bin_edges_
|
161 |
+
n_features = np.array(X).shape[1]
|
162 |
+
assert est.bin_edges_.shape == (n_features,)
|
163 |
+
for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_):
|
164 |
+
assert bin_edges.shape == (n_bins + 1,)
|
165 |
+
|
166 |
+
|
167 |
+
@pytest.mark.filterwarnings("ignore: Bins whose width are too small")
|
168 |
+
def test_kbinsdiscretizer_effect_sample_weight():
|
169 |
+
"""Check the impact of `sample_weight` one computed quantiles."""
|
170 |
+
X = np.array([[-2], [-1], [1], [3], [500], [1000]])
|
171 |
+
# add a large number of bins such that each sample with a non-null weight
|
172 |
+
# will be used as bin edge
|
173 |
+
est = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile")
|
174 |
+
est.fit(X, sample_weight=[1, 1, 1, 1, 0, 0])
|
175 |
+
assert_allclose(est.bin_edges_[0], [-2, -1, 1, 3])
|
176 |
+
assert_allclose(est.transform(X), [[0.0], [1.0], [2.0], [2.0], [2.0], [2.0]])
|
177 |
+
|
178 |
+
|
179 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
180 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
181 |
+
@pytest.mark.parametrize("strategy", ["kmeans", "quantile"])
|
182 |
+
def test_kbinsdiscretizer_no_mutating_sample_weight(strategy):
|
183 |
+
"""Make sure that `sample_weight` is not changed in place."""
|
184 |
+
est = KBinsDiscretizer(n_bins=3, encode="ordinal", strategy=strategy)
|
185 |
+
sample_weight = np.array([1, 3, 1, 2], dtype=np.float64)
|
186 |
+
sample_weight_copy = np.copy(sample_weight)
|
187 |
+
est.fit(X, sample_weight=sample_weight)
|
188 |
+
assert_allclose(sample_weight, sample_weight_copy)
|
189 |
+
|
190 |
+
|
191 |
+
@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"])
|
192 |
+
def test_same_min_max(strategy):
|
193 |
+
warnings.simplefilter("always")
|
194 |
+
X = np.array([[1, -2], [1, -1], [1, 0], [1, 1]])
|
195 |
+
est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode="ordinal")
|
196 |
+
warning_message = "Feature 0 is constant and will be replaced with 0."
|
197 |
+
with pytest.warns(UserWarning, match=warning_message):
|
198 |
+
est.fit(X)
|
199 |
+
assert est.n_bins_[0] == 1
|
200 |
+
# replace the feature with zeros
|
201 |
+
Xt = est.transform(X)
|
202 |
+
assert_array_equal(Xt[:, 0], np.zeros(X.shape[0]))
|
203 |
+
|
204 |
+
|
205 |
+
def test_transform_1d_behavior():
|
206 |
+
X = np.arange(4)
|
207 |
+
est = KBinsDiscretizer(n_bins=2)
|
208 |
+
with pytest.raises(ValueError):
|
209 |
+
est.fit(X)
|
210 |
+
|
211 |
+
est = KBinsDiscretizer(n_bins=2)
|
212 |
+
est.fit(X.reshape(-1, 1))
|
213 |
+
with pytest.raises(ValueError):
|
214 |
+
est.transform(X)
|
215 |
+
|
216 |
+
|
217 |
+
@pytest.mark.parametrize("i", range(1, 9))
|
218 |
+
def test_numeric_stability(i):
|
219 |
+
X_init = np.array([2.0, 4.0, 6.0, 8.0, 10.0]).reshape(-1, 1)
|
220 |
+
Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1)
|
221 |
+
|
222 |
+
# Test up to discretizing nano units
|
223 |
+
X = X_init / 10**i
|
224 |
+
Xt = KBinsDiscretizer(n_bins=2, encode="ordinal").fit_transform(X)
|
225 |
+
assert_array_equal(Xt_expected, Xt)
|
226 |
+
|
227 |
+
|
228 |
+
def test_encode_options():
|
229 |
+
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="ordinal").fit(X)
|
230 |
+
Xt_1 = est.transform(X)
|
231 |
+
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot-dense").fit(X)
|
232 |
+
Xt_2 = est.transform(X)
|
233 |
+
assert not sp.issparse(Xt_2)
|
234 |
+
assert_array_equal(
|
235 |
+
OneHotEncoder(
|
236 |
+
categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=False
|
237 |
+
).fit_transform(Xt_1),
|
238 |
+
Xt_2,
|
239 |
+
)
|
240 |
+
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode="onehot").fit(X)
|
241 |
+
Xt_3 = est.transform(X)
|
242 |
+
assert sp.issparse(Xt_3)
|
243 |
+
assert_array_equal(
|
244 |
+
OneHotEncoder(
|
245 |
+
categories=[np.arange(i) for i in [2, 3, 3, 3]], sparse_output=True
|
246 |
+
)
|
247 |
+
.fit_transform(Xt_1)
|
248 |
+
.toarray(),
|
249 |
+
Xt_3.toarray(),
|
250 |
+
)
|
251 |
+
|
252 |
+
|
253 |
+
@pytest.mark.parametrize(
|
254 |
+
"strategy, expected_2bins, expected_3bins, expected_5bins",
|
255 |
+
[
|
256 |
+
("uniform", [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]),
|
257 |
+
("kmeans", [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]),
|
258 |
+
("quantile", [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4]),
|
259 |
+
],
|
260 |
+
)
|
261 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
262 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
263 |
+
def test_nonuniform_strategies(
|
264 |
+
strategy, expected_2bins, expected_3bins, expected_5bins
|
265 |
+
):
|
266 |
+
X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1)
|
267 |
+
|
268 |
+
# with 2 bins
|
269 |
+
est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode="ordinal")
|
270 |
+
Xt = est.fit_transform(X)
|
271 |
+
assert_array_equal(expected_2bins, Xt.ravel())
|
272 |
+
|
273 |
+
# with 3 bins
|
274 |
+
est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode="ordinal")
|
275 |
+
Xt = est.fit_transform(X)
|
276 |
+
assert_array_equal(expected_3bins, Xt.ravel())
|
277 |
+
|
278 |
+
# with 5 bins
|
279 |
+
est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode="ordinal")
|
280 |
+
Xt = est.fit_transform(X)
|
281 |
+
assert_array_equal(expected_5bins, Xt.ravel())
|
282 |
+
|
283 |
+
|
284 |
+
@pytest.mark.parametrize(
|
285 |
+
"strategy, expected_inv",
|
286 |
+
[
|
287 |
+
(
|
288 |
+
"uniform",
|
289 |
+
[
|
290 |
+
[-1.5, 2.0, -3.5, -0.5],
|
291 |
+
[-0.5, 3.0, -2.5, -0.5],
|
292 |
+
[0.5, 4.0, -1.5, 0.5],
|
293 |
+
[0.5, 4.0, -1.5, 1.5],
|
294 |
+
],
|
295 |
+
),
|
296 |
+
(
|
297 |
+
"kmeans",
|
298 |
+
[
|
299 |
+
[-1.375, 2.125, -3.375, -0.5625],
|
300 |
+
[-1.375, 2.125, -3.375, -0.5625],
|
301 |
+
[-0.125, 3.375, -2.125, 0.5625],
|
302 |
+
[0.75, 4.25, -1.25, 1.625],
|
303 |
+
],
|
304 |
+
),
|
305 |
+
(
|
306 |
+
"quantile",
|
307 |
+
[
|
308 |
+
[-1.5, 2.0, -3.5, -0.75],
|
309 |
+
[-0.5, 3.0, -2.5, 0.0],
|
310 |
+
[0.5, 4.0, -1.5, 1.25],
|
311 |
+
[0.5, 4.0, -1.5, 1.25],
|
312 |
+
],
|
313 |
+
),
|
314 |
+
],
|
315 |
+
)
|
316 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
317 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
318 |
+
@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"])
|
319 |
+
def test_inverse_transform(strategy, encode, expected_inv):
|
320 |
+
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode)
|
321 |
+
Xt = kbd.fit_transform(X)
|
322 |
+
Xinv = kbd.inverse_transform(Xt)
|
323 |
+
assert_array_almost_equal(expected_inv, Xinv)
|
324 |
+
|
325 |
+
|
326 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
327 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
328 |
+
@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"])
|
329 |
+
def test_transform_outside_fit_range(strategy):
|
330 |
+
X = np.array([0, 1, 2, 3])[:, None]
|
331 |
+
kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode="ordinal")
|
332 |
+
kbd.fit(X)
|
333 |
+
|
334 |
+
X2 = np.array([-2, 5])[:, None]
|
335 |
+
X2t = kbd.transform(X2)
|
336 |
+
assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_)
|
337 |
+
assert_array_equal(X2t.min(axis=0), [0])
|
338 |
+
|
339 |
+
|
340 |
+
def test_overwrite():
|
341 |
+
X = np.array([0, 1, 2, 3])[:, None]
|
342 |
+
X_before = X.copy()
|
343 |
+
|
344 |
+
est = KBinsDiscretizer(n_bins=3, encode="ordinal")
|
345 |
+
Xt = est.fit_transform(X)
|
346 |
+
assert_array_equal(X, X_before)
|
347 |
+
|
348 |
+
Xt_before = Xt.copy()
|
349 |
+
Xinv = est.inverse_transform(Xt)
|
350 |
+
assert_array_equal(Xt, Xt_before)
|
351 |
+
assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]]))
|
352 |
+
|
353 |
+
|
354 |
+
@pytest.mark.parametrize(
|
355 |
+
"strategy, expected_bin_edges", [("quantile", [0, 1, 3]), ("kmeans", [0, 1.5, 3])]
|
356 |
+
)
|
357 |
+
def test_redundant_bins(strategy, expected_bin_edges):
|
358 |
+
X = [[0], [0], [0], [0], [3], [3]]
|
359 |
+
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, subsample=None)
|
360 |
+
warning_message = "Consider decreasing the number of bins."
|
361 |
+
with pytest.warns(UserWarning, match=warning_message):
|
362 |
+
kbd.fit(X)
|
363 |
+
assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges)
|
364 |
+
|
365 |
+
|
366 |
+
def test_percentile_numeric_stability():
|
367 |
+
X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1)
|
368 |
+
bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95])
|
369 |
+
Xt = np.array([0, 0, 4]).reshape(-1, 1)
|
370 |
+
kbd = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile")
|
371 |
+
warning_message = "Consider decreasing the number of bins."
|
372 |
+
with pytest.warns(UserWarning, match=warning_message):
|
373 |
+
kbd.fit(X)
|
374 |
+
|
375 |
+
assert_array_almost_equal(kbd.bin_edges_[0], bin_edges)
|
376 |
+
assert_array_almost_equal(kbd.transform(X), Xt)
|
377 |
+
|
378 |
+
|
379 |
+
@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64])
|
380 |
+
@pytest.mark.parametrize("out_dtype", [None, np.float32, np.float64])
|
381 |
+
@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"])
|
382 |
+
def test_consistent_dtype(in_dtype, out_dtype, encode):
|
383 |
+
X_input = np.array(X, dtype=in_dtype)
|
384 |
+
kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype)
|
385 |
+
kbd.fit(X_input)
|
386 |
+
|
387 |
+
# test output dtype
|
388 |
+
if out_dtype is not None:
|
389 |
+
expected_dtype = out_dtype
|
390 |
+
elif out_dtype is None and X_input.dtype == np.float16:
|
391 |
+
# wrong numeric input dtype are cast in np.float64
|
392 |
+
expected_dtype = np.float64
|
393 |
+
else:
|
394 |
+
expected_dtype = X_input.dtype
|
395 |
+
Xt = kbd.transform(X_input)
|
396 |
+
assert Xt.dtype == expected_dtype
|
397 |
+
|
398 |
+
|
399 |
+
@pytest.mark.parametrize("input_dtype", [np.float16, np.float32, np.float64])
|
400 |
+
@pytest.mark.parametrize("encode", ["ordinal", "onehot", "onehot-dense"])
|
401 |
+
def test_32_equal_64(input_dtype, encode):
|
402 |
+
# TODO this check is redundant with common checks and can be removed
|
403 |
+
# once #16290 is merged
|
404 |
+
X_input = np.array(X, dtype=input_dtype)
|
405 |
+
|
406 |
+
# 32 bit output
|
407 |
+
kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32)
|
408 |
+
kbd_32.fit(X_input)
|
409 |
+
Xt_32 = kbd_32.transform(X_input)
|
410 |
+
|
411 |
+
# 64 bit output
|
412 |
+
kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64)
|
413 |
+
kbd_64.fit(X_input)
|
414 |
+
Xt_64 = kbd_64.transform(X_input)
|
415 |
+
|
416 |
+
assert_allclose_dense_sparse(Xt_32, Xt_64)
|
417 |
+
|
418 |
+
|
419 |
+
def test_kbinsdiscretizer_subsample_default():
|
420 |
+
# Since the size of X is small (< 2e5), subsampling will not take place.
|
421 |
+
X = np.array([-2, 1.5, -4, -1]).reshape(-1, 1)
|
422 |
+
kbd_default = KBinsDiscretizer(n_bins=10, encode="ordinal", strategy="quantile")
|
423 |
+
kbd_default.fit(X)
|
424 |
+
|
425 |
+
kbd_without_subsampling = clone(kbd_default)
|
426 |
+
kbd_without_subsampling.set_params(subsample=None)
|
427 |
+
kbd_without_subsampling.fit(X)
|
428 |
+
|
429 |
+
for bin_kbd_default, bin_kbd_with_subsampling in zip(
|
430 |
+
kbd_default.bin_edges_[0], kbd_without_subsampling.bin_edges_[0]
|
431 |
+
):
|
432 |
+
np.testing.assert_allclose(bin_kbd_default, bin_kbd_with_subsampling)
|
433 |
+
assert kbd_default.bin_edges_.shape == kbd_without_subsampling.bin_edges_.shape
|
434 |
+
|
435 |
+
|
436 |
+
@pytest.mark.parametrize(
|
437 |
+
"encode, expected_names",
|
438 |
+
[
|
439 |
+
(
|
440 |
+
"onehot",
|
441 |
+
[
|
442 |
+
f"feat{col_id}_{float(bin_id)}"
|
443 |
+
for col_id in range(3)
|
444 |
+
for bin_id in range(4)
|
445 |
+
],
|
446 |
+
),
|
447 |
+
(
|
448 |
+
"onehot-dense",
|
449 |
+
[
|
450 |
+
f"feat{col_id}_{float(bin_id)}"
|
451 |
+
for col_id in range(3)
|
452 |
+
for bin_id in range(4)
|
453 |
+
],
|
454 |
+
),
|
455 |
+
("ordinal", [f"feat{col_id}" for col_id in range(3)]),
|
456 |
+
],
|
457 |
+
)
|
458 |
+
def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names):
|
459 |
+
"""Check get_feature_names_out for different settings.
|
460 |
+
Non-regression test for #22731
|
461 |
+
"""
|
462 |
+
X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]]
|
463 |
+
|
464 |
+
kbd = KBinsDiscretizer(n_bins=4, encode=encode).fit(X)
|
465 |
+
Xt = kbd.transform(X)
|
466 |
+
|
467 |
+
input_features = [f"feat{i}" for i in range(3)]
|
468 |
+
output_names = kbd.get_feature_names_out(input_features)
|
469 |
+
assert Xt.shape[1] == output_names.shape[0]
|
470 |
+
|
471 |
+
assert_array_equal(output_names, expected_names)
|
472 |
+
|
473 |
+
|
474 |
+
@pytest.mark.parametrize("strategy", ["uniform", "kmeans", "quantile"])
|
475 |
+
def test_kbinsdiscretizer_subsample(strategy, global_random_seed):
|
476 |
+
# Check that the bin edges are almost the same when subsampling is used.
|
477 |
+
X = np.random.RandomState(global_random_seed).random_sample((100000, 1)) + 1
|
478 |
+
|
479 |
+
kbd_subsampling = KBinsDiscretizer(
|
480 |
+
strategy=strategy, subsample=50000, random_state=global_random_seed
|
481 |
+
)
|
482 |
+
kbd_subsampling.fit(X)
|
483 |
+
|
484 |
+
kbd_no_subsampling = clone(kbd_subsampling)
|
485 |
+
kbd_no_subsampling.set_params(subsample=None)
|
486 |
+
kbd_no_subsampling.fit(X)
|
487 |
+
|
488 |
+
# We use a large tolerance because we can't expect the bin edges to be exactly the
|
489 |
+
# same when subsampling is used.
|
490 |
+
assert_allclose(
|
491 |
+
kbd_subsampling.bin_edges_[0], kbd_no_subsampling.bin_edges_[0], rtol=1e-2
|
492 |
+
)
|
493 |
+
|
494 |
+
|
495 |
+
# TODO(1.5) remove this test
|
496 |
+
@pytest.mark.parametrize("strategy", ["uniform", "kmeans"])
|
497 |
+
def test_kbd_subsample_warning(strategy):
|
498 |
+
# Check the future warning for the change of default of subsample
|
499 |
+
X = np.random.RandomState(0).random_sample((100, 1))
|
500 |
+
|
501 |
+
kbd = KBinsDiscretizer(strategy=strategy, random_state=0)
|
502 |
+
with pytest.warns(FutureWarning, match="subsample=200_000 will be used by default"):
|
503 |
+
kbd.fit(X)
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_function_transformer.py
ADDED
@@ -0,0 +1,591 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from sklearn.pipeline import make_pipeline
|
7 |
+
from sklearn.preprocessing import FunctionTransformer, StandardScaler
|
8 |
+
from sklearn.preprocessing._function_transformer import _get_adapter_from_container
|
9 |
+
from sklearn.utils._testing import (
|
10 |
+
_convert_container,
|
11 |
+
assert_allclose_dense_sparse,
|
12 |
+
assert_array_equal,
|
13 |
+
)
|
14 |
+
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
|
15 |
+
|
16 |
+
|
17 |
+
def test_get_adapter_from_container():
|
18 |
+
"""Check the behavior fo `_get_adapter_from_container`."""
|
19 |
+
pd = pytest.importorskip("pandas")
|
20 |
+
X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
|
21 |
+
adapter = _get_adapter_from_container(X)
|
22 |
+
assert adapter.container_lib == "pandas"
|
23 |
+
err_msg = "The container does not have a registered adapter in scikit-learn."
|
24 |
+
with pytest.raises(ValueError, match=err_msg):
|
25 |
+
_get_adapter_from_container(X.to_numpy())
|
26 |
+
|
27 |
+
|
28 |
+
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
|
29 |
+
def _func(X, *args, **kwargs):
|
30 |
+
args_store.append(X)
|
31 |
+
args_store.extend(args)
|
32 |
+
kwargs_store.update(kwargs)
|
33 |
+
return func(X)
|
34 |
+
|
35 |
+
return _func
|
36 |
+
|
37 |
+
|
38 |
+
def test_delegate_to_func():
|
39 |
+
# (args|kwargs)_store will hold the positional and keyword arguments
|
40 |
+
# passed to the function inside the FunctionTransformer.
|
41 |
+
args_store = []
|
42 |
+
kwargs_store = {}
|
43 |
+
X = np.arange(10).reshape((5, 2))
|
44 |
+
assert_array_equal(
|
45 |
+
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
|
46 |
+
X,
|
47 |
+
"transform should have returned X unchanged",
|
48 |
+
)
|
49 |
+
|
50 |
+
# The function should only have received X.
|
51 |
+
assert args_store == [
|
52 |
+
X
|
53 |
+
], "Incorrect positional arguments passed to func: {args}".format(args=args_store)
|
54 |
+
|
55 |
+
assert (
|
56 |
+
not kwargs_store
|
57 |
+
), "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store)
|
58 |
+
|
59 |
+
# reset the argument stores.
|
60 |
+
args_store[:] = []
|
61 |
+
kwargs_store.clear()
|
62 |
+
transformed = FunctionTransformer(
|
63 |
+
_make_func(args_store, kwargs_store),
|
64 |
+
).transform(X)
|
65 |
+
|
66 |
+
assert_array_equal(
|
67 |
+
transformed, X, err_msg="transform should have returned X unchanged"
|
68 |
+
)
|
69 |
+
|
70 |
+
# The function should have received X
|
71 |
+
assert args_store == [
|
72 |
+
X
|
73 |
+
], "Incorrect positional arguments passed to func: {args}".format(args=args_store)
|
74 |
+
|
75 |
+
assert (
|
76 |
+
not kwargs_store
|
77 |
+
), "Unexpected keyword arguments passed to func: {args}".format(args=kwargs_store)
|
78 |
+
|
79 |
+
|
80 |
+
def test_np_log():
|
81 |
+
X = np.arange(10).reshape((5, 2))
|
82 |
+
|
83 |
+
# Test that the numpy.log example still works.
|
84 |
+
assert_array_equal(
|
85 |
+
FunctionTransformer(np.log1p).transform(X),
|
86 |
+
np.log1p(X),
|
87 |
+
)
|
88 |
+
|
89 |
+
|
90 |
+
def test_kw_arg():
|
91 |
+
X = np.linspace(0, 1, num=10).reshape((5, 2))
|
92 |
+
|
93 |
+
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
|
94 |
+
|
95 |
+
# Test that rounding is correct
|
96 |
+
assert_array_equal(F.transform(X), np.around(X, decimals=3))
|
97 |
+
|
98 |
+
|
99 |
+
def test_kw_arg_update():
|
100 |
+
X = np.linspace(0, 1, num=10).reshape((5, 2))
|
101 |
+
|
102 |
+
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
|
103 |
+
|
104 |
+
F.kw_args["decimals"] = 1
|
105 |
+
|
106 |
+
# Test that rounding is correct
|
107 |
+
assert_array_equal(F.transform(X), np.around(X, decimals=1))
|
108 |
+
|
109 |
+
|
110 |
+
def test_kw_arg_reset():
|
111 |
+
X = np.linspace(0, 1, num=10).reshape((5, 2))
|
112 |
+
|
113 |
+
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
|
114 |
+
|
115 |
+
F.kw_args = dict(decimals=1)
|
116 |
+
|
117 |
+
# Test that rounding is correct
|
118 |
+
assert_array_equal(F.transform(X), np.around(X, decimals=1))
|
119 |
+
|
120 |
+
|
121 |
+
def test_inverse_transform():
|
122 |
+
X = np.array([1, 4, 9, 16]).reshape((2, 2))
|
123 |
+
|
124 |
+
# Test that inverse_transform works correctly
|
125 |
+
F = FunctionTransformer(
|
126 |
+
func=np.sqrt,
|
127 |
+
inverse_func=np.around,
|
128 |
+
inv_kw_args=dict(decimals=3),
|
129 |
+
)
|
130 |
+
assert_array_equal(
|
131 |
+
F.inverse_transform(F.transform(X)),
|
132 |
+
np.around(np.sqrt(X), decimals=3),
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
@pytest.mark.parametrize("sparse_container", [None] + CSC_CONTAINERS + CSR_CONTAINERS)
|
137 |
+
def test_check_inverse(sparse_container):
|
138 |
+
X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2))
|
139 |
+
if sparse_container is not None:
|
140 |
+
X = sparse_container(X)
|
141 |
+
|
142 |
+
trans = FunctionTransformer(
|
143 |
+
func=np.sqrt,
|
144 |
+
inverse_func=np.around,
|
145 |
+
accept_sparse=sparse_container is not None,
|
146 |
+
check_inverse=True,
|
147 |
+
validate=True,
|
148 |
+
)
|
149 |
+
warning_message = (
|
150 |
+
"The provided functions are not strictly"
|
151 |
+
" inverse of each other. If you are sure you"
|
152 |
+
" want to proceed regardless, set"
|
153 |
+
" 'check_inverse=False'."
|
154 |
+
)
|
155 |
+
with pytest.warns(UserWarning, match=warning_message):
|
156 |
+
trans.fit(X)
|
157 |
+
|
158 |
+
trans = FunctionTransformer(
|
159 |
+
func=np.expm1,
|
160 |
+
inverse_func=np.log1p,
|
161 |
+
accept_sparse=sparse_container is not None,
|
162 |
+
check_inverse=True,
|
163 |
+
validate=True,
|
164 |
+
)
|
165 |
+
with warnings.catch_warnings():
|
166 |
+
warnings.simplefilter("error", UserWarning)
|
167 |
+
Xt = trans.fit_transform(X)
|
168 |
+
|
169 |
+
assert_allclose_dense_sparse(X, trans.inverse_transform(Xt))
|
170 |
+
|
171 |
+
|
172 |
+
def test_check_inverse_func_or_inverse_not_provided():
|
173 |
+
# check that we don't check inverse when one of the func or inverse is not
|
174 |
+
# provided.
|
175 |
+
X = np.array([1, 4, 9, 16], dtype=np.float64).reshape((2, 2))
|
176 |
+
|
177 |
+
trans = FunctionTransformer(
|
178 |
+
func=np.expm1, inverse_func=None, check_inverse=True, validate=True
|
179 |
+
)
|
180 |
+
with warnings.catch_warnings():
|
181 |
+
warnings.simplefilter("error", UserWarning)
|
182 |
+
trans.fit(X)
|
183 |
+
trans = FunctionTransformer(
|
184 |
+
func=None, inverse_func=np.expm1, check_inverse=True, validate=True
|
185 |
+
)
|
186 |
+
with warnings.catch_warnings():
|
187 |
+
warnings.simplefilter("error", UserWarning)
|
188 |
+
trans.fit(X)
|
189 |
+
|
190 |
+
|
191 |
+
def test_function_transformer_frame():
|
192 |
+
pd = pytest.importorskip("pandas")
|
193 |
+
X_df = pd.DataFrame(np.random.randn(100, 10))
|
194 |
+
transformer = FunctionTransformer()
|
195 |
+
X_df_trans = transformer.fit_transform(X_df)
|
196 |
+
assert hasattr(X_df_trans, "loc")
|
197 |
+
|
198 |
+
|
199 |
+
@pytest.mark.parametrize("X_type", ["array", "series"])
|
200 |
+
def test_function_transformer_raise_error_with_mixed_dtype(X_type):
|
201 |
+
"""Check that `FunctionTransformer.check_inverse` raises error on mixed dtype."""
|
202 |
+
mapping = {"one": 1, "two": 2, "three": 3, 5: "five", 6: "six"}
|
203 |
+
inverse_mapping = {value: key for key, value in mapping.items()}
|
204 |
+
dtype = "object"
|
205 |
+
|
206 |
+
data = ["one", "two", "three", "one", "one", 5, 6]
|
207 |
+
data = _convert_container(data, X_type, columns_name=["value"], dtype=dtype)
|
208 |
+
|
209 |
+
def func(X):
|
210 |
+
return np.array([mapping[X[i]] for i in range(X.size)], dtype=object)
|
211 |
+
|
212 |
+
def inverse_func(X):
|
213 |
+
return _convert_container(
|
214 |
+
[inverse_mapping[x] for x in X],
|
215 |
+
X_type,
|
216 |
+
columns_name=["value"],
|
217 |
+
dtype=dtype,
|
218 |
+
)
|
219 |
+
|
220 |
+
transformer = FunctionTransformer(
|
221 |
+
func=func, inverse_func=inverse_func, validate=False, check_inverse=True
|
222 |
+
)
|
223 |
+
|
224 |
+
msg = "'check_inverse' is only supported when all the elements in `X` is numerical."
|
225 |
+
with pytest.raises(ValueError, match=msg):
|
226 |
+
transformer.fit(data)
|
227 |
+
|
228 |
+
|
229 |
+
def test_function_transformer_support_all_nummerical_dataframes_check_inverse_True():
|
230 |
+
"""Check support for dataframes with only numerical values."""
|
231 |
+
pd = pytest.importorskip("pandas")
|
232 |
+
|
233 |
+
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
234 |
+
transformer = FunctionTransformer(
|
235 |
+
func=lambda x: x + 2, inverse_func=lambda x: x - 2, check_inverse=True
|
236 |
+
)
|
237 |
+
|
238 |
+
# Does not raise an error
|
239 |
+
df_out = transformer.fit_transform(df)
|
240 |
+
assert_allclose_dense_sparse(df_out, df + 2)
|
241 |
+
|
242 |
+
|
243 |
+
def test_function_transformer_with_dataframe_and_check_inverse_True():
|
244 |
+
"""Check error is raised when check_inverse=True.
|
245 |
+
|
246 |
+
Non-regresion test for gh-25261.
|
247 |
+
"""
|
248 |
+
pd = pytest.importorskip("pandas")
|
249 |
+
transformer = FunctionTransformer(
|
250 |
+
func=lambda x: x, inverse_func=lambda x: x, check_inverse=True
|
251 |
+
)
|
252 |
+
|
253 |
+
df_mixed = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
|
254 |
+
msg = "'check_inverse' is only supported when all the elements in `X` is numerical."
|
255 |
+
with pytest.raises(ValueError, match=msg):
|
256 |
+
transformer.fit(df_mixed)
|
257 |
+
|
258 |
+
|
259 |
+
@pytest.mark.parametrize(
|
260 |
+
"X, feature_names_out, input_features, expected",
|
261 |
+
[
|
262 |
+
(
|
263 |
+
# NumPy inputs, default behavior: generate names
|
264 |
+
np.random.rand(100, 3),
|
265 |
+
"one-to-one",
|
266 |
+
None,
|
267 |
+
("x0", "x1", "x2"),
|
268 |
+
),
|
269 |
+
(
|
270 |
+
# Pandas input, default behavior: use input feature names
|
271 |
+
{"a": np.random.rand(100), "b": np.random.rand(100)},
|
272 |
+
"one-to-one",
|
273 |
+
None,
|
274 |
+
("a", "b"),
|
275 |
+
),
|
276 |
+
(
|
277 |
+
# NumPy input, feature_names_out=callable
|
278 |
+
np.random.rand(100, 3),
|
279 |
+
lambda transformer, input_features: ("a", "b"),
|
280 |
+
None,
|
281 |
+
("a", "b"),
|
282 |
+
),
|
283 |
+
(
|
284 |
+
# Pandas input, feature_names_out=callable
|
285 |
+
{"a": np.random.rand(100), "b": np.random.rand(100)},
|
286 |
+
lambda transformer, input_features: ("c", "d", "e"),
|
287 |
+
None,
|
288 |
+
("c", "d", "e"),
|
289 |
+
),
|
290 |
+
(
|
291 |
+
# NumPy input, feature_names_out=callable – default input_features
|
292 |
+
np.random.rand(100, 3),
|
293 |
+
lambda transformer, input_features: tuple(input_features) + ("a",),
|
294 |
+
None,
|
295 |
+
("x0", "x1", "x2", "a"),
|
296 |
+
),
|
297 |
+
(
|
298 |
+
# Pandas input, feature_names_out=callable – default input_features
|
299 |
+
{"a": np.random.rand(100), "b": np.random.rand(100)},
|
300 |
+
lambda transformer, input_features: tuple(input_features) + ("c",),
|
301 |
+
None,
|
302 |
+
("a", "b", "c"),
|
303 |
+
),
|
304 |
+
(
|
305 |
+
# NumPy input, input_features=list of names
|
306 |
+
np.random.rand(100, 3),
|
307 |
+
"one-to-one",
|
308 |
+
("a", "b", "c"),
|
309 |
+
("a", "b", "c"),
|
310 |
+
),
|
311 |
+
(
|
312 |
+
# Pandas input, input_features=list of names
|
313 |
+
{"a": np.random.rand(100), "b": np.random.rand(100)},
|
314 |
+
"one-to-one",
|
315 |
+
("a", "b"), # must match feature_names_in_
|
316 |
+
("a", "b"),
|
317 |
+
),
|
318 |
+
(
|
319 |
+
# NumPy input, feature_names_out=callable, input_features=list
|
320 |
+
np.random.rand(100, 3),
|
321 |
+
lambda transformer, input_features: tuple(input_features) + ("d",),
|
322 |
+
("a", "b", "c"),
|
323 |
+
("a", "b", "c", "d"),
|
324 |
+
),
|
325 |
+
(
|
326 |
+
# Pandas input, feature_names_out=callable, input_features=list
|
327 |
+
{"a": np.random.rand(100), "b": np.random.rand(100)},
|
328 |
+
lambda transformer, input_features: tuple(input_features) + ("c",),
|
329 |
+
("a", "b"), # must match feature_names_in_
|
330 |
+
("a", "b", "c"),
|
331 |
+
),
|
332 |
+
],
|
333 |
+
)
|
334 |
+
@pytest.mark.parametrize("validate", [True, False])
|
335 |
+
def test_function_transformer_get_feature_names_out(
|
336 |
+
X, feature_names_out, input_features, expected, validate
|
337 |
+
):
|
338 |
+
if isinstance(X, dict):
|
339 |
+
pd = pytest.importorskip("pandas")
|
340 |
+
X = pd.DataFrame(X)
|
341 |
+
|
342 |
+
transformer = FunctionTransformer(
|
343 |
+
feature_names_out=feature_names_out, validate=validate
|
344 |
+
)
|
345 |
+
transformer.fit(X)
|
346 |
+
names = transformer.get_feature_names_out(input_features)
|
347 |
+
assert isinstance(names, np.ndarray)
|
348 |
+
assert names.dtype == object
|
349 |
+
assert_array_equal(names, expected)
|
350 |
+
|
351 |
+
|
352 |
+
def test_function_transformer_get_feature_names_out_without_validation():
|
353 |
+
transformer = FunctionTransformer(feature_names_out="one-to-one", validate=False)
|
354 |
+
X = np.random.rand(100, 2)
|
355 |
+
transformer.fit_transform(X)
|
356 |
+
|
357 |
+
names = transformer.get_feature_names_out(("a", "b"))
|
358 |
+
assert isinstance(names, np.ndarray)
|
359 |
+
assert names.dtype == object
|
360 |
+
assert_array_equal(names, ("a", "b"))
|
361 |
+
|
362 |
+
|
363 |
+
def test_function_transformer_feature_names_out_is_None():
|
364 |
+
transformer = FunctionTransformer()
|
365 |
+
X = np.random.rand(100, 2)
|
366 |
+
transformer.fit_transform(X)
|
367 |
+
|
368 |
+
msg = "This 'FunctionTransformer' has no attribute 'get_feature_names_out'"
|
369 |
+
with pytest.raises(AttributeError, match=msg):
|
370 |
+
transformer.get_feature_names_out()
|
371 |
+
|
372 |
+
|
373 |
+
def test_function_transformer_feature_names_out_uses_estimator():
|
374 |
+
def add_n_random_features(X, n):
|
375 |
+
return np.concatenate([X, np.random.rand(len(X), n)], axis=1)
|
376 |
+
|
377 |
+
def feature_names_out(transformer, input_features):
|
378 |
+
n = transformer.kw_args["n"]
|
379 |
+
return list(input_features) + [f"rnd{i}" for i in range(n)]
|
380 |
+
|
381 |
+
transformer = FunctionTransformer(
|
382 |
+
func=add_n_random_features,
|
383 |
+
feature_names_out=feature_names_out,
|
384 |
+
kw_args=dict(n=3),
|
385 |
+
validate=True,
|
386 |
+
)
|
387 |
+
pd = pytest.importorskip("pandas")
|
388 |
+
df = pd.DataFrame({"a": np.random.rand(100), "b": np.random.rand(100)})
|
389 |
+
transformer.fit_transform(df)
|
390 |
+
names = transformer.get_feature_names_out()
|
391 |
+
|
392 |
+
assert isinstance(names, np.ndarray)
|
393 |
+
assert names.dtype == object
|
394 |
+
assert_array_equal(names, ("a", "b", "rnd0", "rnd1", "rnd2"))
|
395 |
+
|
396 |
+
|
397 |
+
def test_function_transformer_validate_inverse():
|
398 |
+
"""Test that function transformer does not reset estimator in
|
399 |
+
`inverse_transform`."""
|
400 |
+
|
401 |
+
def add_constant_feature(X):
|
402 |
+
X_one = np.ones((X.shape[0], 1))
|
403 |
+
return np.concatenate((X, X_one), axis=1)
|
404 |
+
|
405 |
+
def inverse_add_constant(X):
|
406 |
+
return X[:, :-1]
|
407 |
+
|
408 |
+
X = np.array([[1, 2], [3, 4], [3, 4]])
|
409 |
+
trans = FunctionTransformer(
|
410 |
+
func=add_constant_feature,
|
411 |
+
inverse_func=inverse_add_constant,
|
412 |
+
validate=True,
|
413 |
+
)
|
414 |
+
X_trans = trans.fit_transform(X)
|
415 |
+
assert trans.n_features_in_ == X.shape[1]
|
416 |
+
|
417 |
+
trans.inverse_transform(X_trans)
|
418 |
+
assert trans.n_features_in_ == X.shape[1]
|
419 |
+
|
420 |
+
|
421 |
+
@pytest.mark.parametrize(
|
422 |
+
"feature_names_out, expected",
|
423 |
+
[
|
424 |
+
("one-to-one", ["pet", "color"]),
|
425 |
+
[lambda est, names: [f"{n}_out" for n in names], ["pet_out", "color_out"]],
|
426 |
+
],
|
427 |
+
)
|
428 |
+
@pytest.mark.parametrize("in_pipeline", [True, False])
|
429 |
+
def test_get_feature_names_out_dataframe_with_string_data(
|
430 |
+
feature_names_out, expected, in_pipeline
|
431 |
+
):
|
432 |
+
"""Check that get_feature_names_out works with DataFrames with string data."""
|
433 |
+
pd = pytest.importorskip("pandas")
|
434 |
+
X = pd.DataFrame({"pet": ["dog", "cat"], "color": ["red", "green"]})
|
435 |
+
|
436 |
+
def func(X):
|
437 |
+
if feature_names_out == "one-to-one":
|
438 |
+
return X
|
439 |
+
else:
|
440 |
+
name = feature_names_out(None, X.columns)
|
441 |
+
return X.rename(columns=dict(zip(X.columns, name)))
|
442 |
+
|
443 |
+
transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out)
|
444 |
+
if in_pipeline:
|
445 |
+
transformer = make_pipeline(transformer)
|
446 |
+
|
447 |
+
X_trans = transformer.fit_transform(X)
|
448 |
+
assert isinstance(X_trans, pd.DataFrame)
|
449 |
+
|
450 |
+
names = transformer.get_feature_names_out()
|
451 |
+
assert isinstance(names, np.ndarray)
|
452 |
+
assert names.dtype == object
|
453 |
+
assert_array_equal(names, expected)
|
454 |
+
|
455 |
+
|
456 |
+
def test_set_output_func():
|
457 |
+
"""Check behavior of set_output with different settings."""
|
458 |
+
pd = pytest.importorskip("pandas")
|
459 |
+
|
460 |
+
X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
|
461 |
+
|
462 |
+
ft = FunctionTransformer(np.log, feature_names_out="one-to-one")
|
463 |
+
|
464 |
+
# no warning is raised when feature_names_out is defined
|
465 |
+
with warnings.catch_warnings():
|
466 |
+
warnings.simplefilter("error", UserWarning)
|
467 |
+
ft.set_output(transform="pandas")
|
468 |
+
|
469 |
+
X_trans = ft.fit_transform(X)
|
470 |
+
assert isinstance(X_trans, pd.DataFrame)
|
471 |
+
assert_array_equal(X_trans.columns, ["a", "b"])
|
472 |
+
|
473 |
+
ft = FunctionTransformer(lambda x: 2 * x)
|
474 |
+
ft.set_output(transform="pandas")
|
475 |
+
|
476 |
+
# no warning is raised when func returns a panda dataframe
|
477 |
+
with warnings.catch_warnings():
|
478 |
+
warnings.simplefilter("error", UserWarning)
|
479 |
+
X_trans = ft.fit_transform(X)
|
480 |
+
assert isinstance(X_trans, pd.DataFrame)
|
481 |
+
assert_array_equal(X_trans.columns, ["a", "b"])
|
482 |
+
|
483 |
+
# Warning is raised when func returns a ndarray
|
484 |
+
ft_np = FunctionTransformer(lambda x: np.asarray(x))
|
485 |
+
|
486 |
+
for transform in ("pandas", "polars"):
|
487 |
+
ft_np.set_output(transform=transform)
|
488 |
+
msg = (
|
489 |
+
f"When `set_output` is configured to be '{transform}'.*{transform} "
|
490 |
+
"DataFrame.*"
|
491 |
+
)
|
492 |
+
with pytest.warns(UserWarning, match=msg):
|
493 |
+
ft_np.fit_transform(X)
|
494 |
+
|
495 |
+
# default transform does not warn
|
496 |
+
ft_np.set_output(transform="default")
|
497 |
+
with warnings.catch_warnings():
|
498 |
+
warnings.simplefilter("error", UserWarning)
|
499 |
+
ft_np.fit_transform(X)
|
500 |
+
|
501 |
+
|
502 |
+
def test_consistence_column_name_between_steps():
|
503 |
+
"""Check that we have a consistence between the feature names out of
|
504 |
+
`FunctionTransformer` and the feature names in of the next step in the pipeline.
|
505 |
+
|
506 |
+
Non-regression test for:
|
507 |
+
https://github.com/scikit-learn/scikit-learn/issues/27695
|
508 |
+
"""
|
509 |
+
pd = pytest.importorskip("pandas")
|
510 |
+
|
511 |
+
def with_suffix(_, names):
|
512 |
+
return [name + "__log" for name in names]
|
513 |
+
|
514 |
+
pipeline = make_pipeline(
|
515 |
+
FunctionTransformer(np.log1p, feature_names_out=with_suffix), StandardScaler()
|
516 |
+
)
|
517 |
+
|
518 |
+
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=["a", "b"])
|
519 |
+
X_trans = pipeline.fit_transform(df)
|
520 |
+
assert pipeline.get_feature_names_out().tolist() == ["a__log", "b__log"]
|
521 |
+
# StandardScaler will convert to a numpy array
|
522 |
+
assert isinstance(X_trans, np.ndarray)
|
523 |
+
|
524 |
+
|
525 |
+
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
|
526 |
+
@pytest.mark.parametrize("transform_output", ["default", "pandas", "polars"])
|
527 |
+
def test_function_transformer_overwrite_column_names(dataframe_lib, transform_output):
|
528 |
+
"""Check that we overwrite the column names when we should."""
|
529 |
+
lib = pytest.importorskip(dataframe_lib)
|
530 |
+
if transform_output != "numpy":
|
531 |
+
pytest.importorskip(transform_output)
|
532 |
+
|
533 |
+
df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
|
534 |
+
|
535 |
+
def with_suffix(_, names):
|
536 |
+
return [name + "__log" for name in names]
|
537 |
+
|
538 |
+
transformer = FunctionTransformer(feature_names_out=with_suffix).set_output(
|
539 |
+
transform=transform_output
|
540 |
+
)
|
541 |
+
X_trans = transformer.fit_transform(df)
|
542 |
+
assert_array_equal(np.asarray(X_trans), np.asarray(df))
|
543 |
+
|
544 |
+
feature_names = transformer.get_feature_names_out()
|
545 |
+
assert list(X_trans.columns) == with_suffix(None, df.columns)
|
546 |
+
assert feature_names.tolist() == with_suffix(None, df.columns)
|
547 |
+
|
548 |
+
|
549 |
+
@pytest.mark.parametrize(
|
550 |
+
"feature_names_out",
|
551 |
+
["one-to-one", lambda _, names: [f"{name}_log" for name in names]],
|
552 |
+
)
|
553 |
+
def test_function_transformer_overwrite_column_names_numerical(feature_names_out):
|
554 |
+
"""Check the same as `test_function_transformer_overwrite_column_names`
|
555 |
+
but for the specific case of pandas where column names can be numerical."""
|
556 |
+
pd = pytest.importorskip("pandas")
|
557 |
+
|
558 |
+
df = pd.DataFrame({0: [1, 2, 3], 1: [10, 20, 100]})
|
559 |
+
|
560 |
+
transformer = FunctionTransformer(feature_names_out=feature_names_out)
|
561 |
+
X_trans = transformer.fit_transform(df)
|
562 |
+
assert_array_equal(np.asarray(X_trans), np.asarray(df))
|
563 |
+
|
564 |
+
feature_names = transformer.get_feature_names_out()
|
565 |
+
assert list(X_trans.columns) == list(feature_names)
|
566 |
+
|
567 |
+
|
568 |
+
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
|
569 |
+
@pytest.mark.parametrize(
|
570 |
+
"feature_names_out",
|
571 |
+
["one-to-one", lambda _, names: [f"{name}_log" for name in names]],
|
572 |
+
)
|
573 |
+
def test_function_transformer_error_column_inconsistent(
|
574 |
+
dataframe_lib, feature_names_out
|
575 |
+
):
|
576 |
+
"""Check that we raise an error when `func` returns a dataframe with new
|
577 |
+
column names that become inconsistent with `get_feature_names_out`."""
|
578 |
+
lib = pytest.importorskip(dataframe_lib)
|
579 |
+
|
580 |
+
df = lib.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
|
581 |
+
|
582 |
+
def func(df):
|
583 |
+
if dataframe_lib == "pandas":
|
584 |
+
return df.rename(columns={"a": "c"})
|
585 |
+
else:
|
586 |
+
return df.rename({"a": "c"})
|
587 |
+
|
588 |
+
transformer = FunctionTransformer(func=func, feature_names_out=feature_names_out)
|
589 |
+
err_msg = "The output generated by `func` have different column names"
|
590 |
+
with pytest.raises(ValueError, match=err_msg):
|
591 |
+
transformer.fit_transform(df).columns
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_polynomial.py
ADDED
@@ -0,0 +1,1258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
6 |
+
from scipy import sparse
|
7 |
+
from scipy.interpolate import BSpline
|
8 |
+
from scipy.sparse import random as sparse_random
|
9 |
+
|
10 |
+
from sklearn.linear_model import LinearRegression
|
11 |
+
from sklearn.pipeline import Pipeline
|
12 |
+
from sklearn.preprocessing import (
|
13 |
+
KBinsDiscretizer,
|
14 |
+
PolynomialFeatures,
|
15 |
+
SplineTransformer,
|
16 |
+
)
|
17 |
+
from sklearn.preprocessing._csr_polynomial_expansion import (
|
18 |
+
_calc_expanded_nnz,
|
19 |
+
_calc_total_nnz,
|
20 |
+
_get_sizeof_LARGEST_INT_t,
|
21 |
+
)
|
22 |
+
from sklearn.utils._testing import assert_array_almost_equal
|
23 |
+
from sklearn.utils.fixes import (
|
24 |
+
CSC_CONTAINERS,
|
25 |
+
CSR_CONTAINERS,
|
26 |
+
parse_version,
|
27 |
+
sp_version,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.mark.parametrize("est", (PolynomialFeatures, SplineTransformer))
|
32 |
+
def test_polynomial_and_spline_array_order(est):
|
33 |
+
"""Test that output array has the given order."""
|
34 |
+
X = np.arange(10).reshape(5, 2)
|
35 |
+
|
36 |
+
def is_c_contiguous(a):
|
37 |
+
return np.isfortran(a.T)
|
38 |
+
|
39 |
+
assert is_c_contiguous(est().fit_transform(X))
|
40 |
+
assert is_c_contiguous(est(order="C").fit_transform(X))
|
41 |
+
assert np.isfortran(est(order="F").fit_transform(X))
|
42 |
+
|
43 |
+
|
44 |
+
@pytest.mark.parametrize(
|
45 |
+
"params, err_msg",
|
46 |
+
[
|
47 |
+
({"knots": [[1]]}, r"Number of knots, knots.shape\[0\], must be >= 2."),
|
48 |
+
({"knots": [[1, 1], [2, 2]]}, r"knots.shape\[1\] == n_features is violated"),
|
49 |
+
({"knots": [[1], [0]]}, "knots must be sorted without duplicates."),
|
50 |
+
],
|
51 |
+
)
|
52 |
+
def test_spline_transformer_input_validation(params, err_msg):
|
53 |
+
"""Test that we raise errors for invalid input in SplineTransformer."""
|
54 |
+
X = [[1], [2]]
|
55 |
+
|
56 |
+
with pytest.raises(ValueError, match=err_msg):
|
57 |
+
SplineTransformer(**params).fit(X)
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.mark.parametrize("extrapolation", ["continue", "periodic"])
|
61 |
+
def test_spline_transformer_integer_knots(extrapolation):
|
62 |
+
"""Test that SplineTransformer accepts integer value knot positions."""
|
63 |
+
X = np.arange(20).reshape(10, 2)
|
64 |
+
knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]
|
65 |
+
_ = SplineTransformer(
|
66 |
+
degree=3, knots=knots, extrapolation=extrapolation
|
67 |
+
).fit_transform(X)
|
68 |
+
|
69 |
+
|
70 |
+
def test_spline_transformer_feature_names():
|
71 |
+
"""Test that SplineTransformer generates correct features name."""
|
72 |
+
X = np.arange(20).reshape(10, 2)
|
73 |
+
splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)
|
74 |
+
feature_names = splt.get_feature_names_out()
|
75 |
+
assert_array_equal(
|
76 |
+
feature_names,
|
77 |
+
[
|
78 |
+
"x0_sp_0",
|
79 |
+
"x0_sp_1",
|
80 |
+
"x0_sp_2",
|
81 |
+
"x0_sp_3",
|
82 |
+
"x0_sp_4",
|
83 |
+
"x1_sp_0",
|
84 |
+
"x1_sp_1",
|
85 |
+
"x1_sp_2",
|
86 |
+
"x1_sp_3",
|
87 |
+
"x1_sp_4",
|
88 |
+
],
|
89 |
+
)
|
90 |
+
|
91 |
+
splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)
|
92 |
+
feature_names = splt.get_feature_names_out(["a", "b"])
|
93 |
+
assert_array_equal(
|
94 |
+
feature_names,
|
95 |
+
[
|
96 |
+
"a_sp_0",
|
97 |
+
"a_sp_1",
|
98 |
+
"a_sp_2",
|
99 |
+
"a_sp_3",
|
100 |
+
"b_sp_0",
|
101 |
+
"b_sp_1",
|
102 |
+
"b_sp_2",
|
103 |
+
"b_sp_3",
|
104 |
+
],
|
105 |
+
)
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.mark.parametrize(
|
109 |
+
"extrapolation",
|
110 |
+
["constant", "linear", "continue", "periodic"],
|
111 |
+
)
|
112 |
+
@pytest.mark.parametrize("degree", [2, 3])
|
113 |
+
def test_split_transform_feature_names_extrapolation_degree(extrapolation, degree):
|
114 |
+
"""Test feature names are correct for different extrapolations and degree.
|
115 |
+
|
116 |
+
Non-regression test for gh-25292.
|
117 |
+
"""
|
118 |
+
X = np.arange(20).reshape(10, 2)
|
119 |
+
splt = SplineTransformer(degree=degree, extrapolation=extrapolation).fit(X)
|
120 |
+
feature_names = splt.get_feature_names_out(["a", "b"])
|
121 |
+
assert len(feature_names) == splt.n_features_out_
|
122 |
+
|
123 |
+
X_trans = splt.transform(X)
|
124 |
+
assert X_trans.shape[1] == len(feature_names)
|
125 |
+
|
126 |
+
|
127 |
+
@pytest.mark.parametrize("degree", range(1, 5))
|
128 |
+
@pytest.mark.parametrize("n_knots", range(3, 5))
|
129 |
+
@pytest.mark.parametrize("knots", ["uniform", "quantile"])
|
130 |
+
@pytest.mark.parametrize("extrapolation", ["constant", "periodic"])
|
131 |
+
def test_spline_transformer_unity_decomposition(degree, n_knots, knots, extrapolation):
|
132 |
+
"""Test that B-splines are indeed a decomposition of unity.
|
133 |
+
|
134 |
+
Splines basis functions must sum up to 1 per row, if we stay in between boundaries.
|
135 |
+
"""
|
136 |
+
X = np.linspace(0, 1, 100)[:, None]
|
137 |
+
# make the boundaries 0 and 1 part of X_train, for sure.
|
138 |
+
X_train = np.r_[[[0]], X[::2, :], [[1]]]
|
139 |
+
X_test = X[1::2, :]
|
140 |
+
|
141 |
+
if extrapolation == "periodic":
|
142 |
+
n_knots = n_knots + degree # periodic splines require degree < n_knots
|
143 |
+
|
144 |
+
splt = SplineTransformer(
|
145 |
+
n_knots=n_knots,
|
146 |
+
degree=degree,
|
147 |
+
knots=knots,
|
148 |
+
include_bias=True,
|
149 |
+
extrapolation=extrapolation,
|
150 |
+
)
|
151 |
+
splt.fit(X_train)
|
152 |
+
for X in [X_train, X_test]:
|
153 |
+
assert_allclose(np.sum(splt.transform(X), axis=1), 1)
|
154 |
+
|
155 |
+
|
156 |
+
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
|
157 |
+
def test_spline_transformer_linear_regression(bias, intercept):
|
158 |
+
"""Test that B-splines fit a sinusodial curve pretty well."""
|
159 |
+
X = np.linspace(0, 10, 100)[:, None]
|
160 |
+
y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose
|
161 |
+
pipe = Pipeline(
|
162 |
+
steps=[
|
163 |
+
(
|
164 |
+
"spline",
|
165 |
+
SplineTransformer(
|
166 |
+
n_knots=15,
|
167 |
+
degree=3,
|
168 |
+
include_bias=bias,
|
169 |
+
extrapolation="constant",
|
170 |
+
),
|
171 |
+
),
|
172 |
+
("ols", LinearRegression(fit_intercept=intercept)),
|
173 |
+
]
|
174 |
+
)
|
175 |
+
pipe.fit(X, y)
|
176 |
+
assert_allclose(pipe.predict(X), y, rtol=1e-3)
|
177 |
+
|
178 |
+
|
179 |
+
@pytest.mark.parametrize(
|
180 |
+
["knots", "n_knots", "sample_weight", "expected_knots"],
|
181 |
+
[
|
182 |
+
("uniform", 3, None, np.array([[0, 2], [3, 8], [6, 14]])),
|
183 |
+
(
|
184 |
+
"uniform",
|
185 |
+
3,
|
186 |
+
np.array([0, 0, 1, 1, 0, 3, 1]),
|
187 |
+
np.array([[2, 2], [4, 8], [6, 14]]),
|
188 |
+
),
|
189 |
+
("uniform", 4, None, np.array([[0, 2], [2, 6], [4, 10], [6, 14]])),
|
190 |
+
("quantile", 3, None, np.array([[0, 2], [3, 3], [6, 14]])),
|
191 |
+
(
|
192 |
+
"quantile",
|
193 |
+
3,
|
194 |
+
np.array([0, 0, 1, 1, 0, 3, 1]),
|
195 |
+
np.array([[2, 2], [5, 8], [6, 14]]),
|
196 |
+
),
|
197 |
+
],
|
198 |
+
)
|
199 |
+
def test_spline_transformer_get_base_knot_positions(
|
200 |
+
knots, n_knots, sample_weight, expected_knots
|
201 |
+
):
|
202 |
+
"""Check the behaviour to find knot positions with and without sample_weight."""
|
203 |
+
X = np.array([[0, 2], [0, 2], [2, 2], [3, 3], [4, 6], [5, 8], [6, 14]])
|
204 |
+
base_knots = SplineTransformer._get_base_knot_positions(
|
205 |
+
X=X, knots=knots, n_knots=n_knots, sample_weight=sample_weight
|
206 |
+
)
|
207 |
+
assert_allclose(base_knots, expected_knots)
|
208 |
+
|
209 |
+
|
210 |
+
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
|
211 |
+
def test_spline_transformer_periodic_linear_regression(bias, intercept):
|
212 |
+
"""Test that B-splines fit a periodic curve pretty well."""
|
213 |
+
|
214 |
+
# "+ 3" to avoid the value 0 in assert_allclose
|
215 |
+
def f(x):
|
216 |
+
return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3
|
217 |
+
|
218 |
+
X = np.linspace(0, 1, 101)[:, None]
|
219 |
+
pipe = Pipeline(
|
220 |
+
steps=[
|
221 |
+
(
|
222 |
+
"spline",
|
223 |
+
SplineTransformer(
|
224 |
+
n_knots=20,
|
225 |
+
degree=3,
|
226 |
+
include_bias=bias,
|
227 |
+
extrapolation="periodic",
|
228 |
+
),
|
229 |
+
),
|
230 |
+
("ols", LinearRegression(fit_intercept=intercept)),
|
231 |
+
]
|
232 |
+
)
|
233 |
+
pipe.fit(X, f(X[:, 0]))
|
234 |
+
|
235 |
+
# Generate larger array to check periodic extrapolation
|
236 |
+
X_ = np.linspace(-1, 2, 301)[:, None]
|
237 |
+
predictions = pipe.predict(X_)
|
238 |
+
assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)
|
239 |
+
assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)
|
240 |
+
|
241 |
+
|
242 |
+
def test_spline_transformer_periodic_spline_backport():
|
243 |
+
"""Test that the backport of extrapolate="periodic" works correctly"""
|
244 |
+
X = np.linspace(-2, 3.5, 10)[:, None]
|
245 |
+
degree = 2
|
246 |
+
|
247 |
+
# Use periodic extrapolation backport in SplineTransformer
|
248 |
+
transformer = SplineTransformer(
|
249 |
+
degree=degree, extrapolation="periodic", knots=[[-1.0], [0.0], [1.0]]
|
250 |
+
)
|
251 |
+
Xt = transformer.fit_transform(X)
|
252 |
+
|
253 |
+
# Use periodic extrapolation in BSpline
|
254 |
+
coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
|
255 |
+
spl = BSpline(np.arange(-3, 4), coef, degree, "periodic")
|
256 |
+
Xspl = spl(X[:, 0])
|
257 |
+
assert_allclose(Xt, Xspl)
|
258 |
+
|
259 |
+
|
260 |
+
def test_spline_transformer_periodic_splines_periodicity():
|
261 |
+
"""Test if shifted knots result in the same transformation up to permutation."""
|
262 |
+
X = np.linspace(0, 10, 101)[:, None]
|
263 |
+
|
264 |
+
transformer_1 = SplineTransformer(
|
265 |
+
degree=3,
|
266 |
+
extrapolation="periodic",
|
267 |
+
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
|
268 |
+
)
|
269 |
+
|
270 |
+
transformer_2 = SplineTransformer(
|
271 |
+
degree=3,
|
272 |
+
extrapolation="periodic",
|
273 |
+
knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]],
|
274 |
+
)
|
275 |
+
|
276 |
+
Xt_1 = transformer_1.fit_transform(X)
|
277 |
+
Xt_2 = transformer_2.fit_transform(X)
|
278 |
+
|
279 |
+
assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])
|
280 |
+
|
281 |
+
|
282 |
+
@pytest.mark.parametrize("degree", [3, 5])
|
283 |
+
def test_spline_transformer_periodic_splines_smoothness(degree):
|
284 |
+
"""Test that spline transformation is smooth at first / last knot."""
|
285 |
+
X = np.linspace(-2, 10, 10_000)[:, None]
|
286 |
+
|
287 |
+
transformer = SplineTransformer(
|
288 |
+
degree=degree,
|
289 |
+
extrapolation="periodic",
|
290 |
+
knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]],
|
291 |
+
)
|
292 |
+
Xt = transformer.fit_transform(X)
|
293 |
+
|
294 |
+
delta = (X.max() - X.min()) / len(X)
|
295 |
+
tol = 10 * delta
|
296 |
+
|
297 |
+
dXt = Xt
|
298 |
+
# We expect splines of degree `degree` to be (`degree`-1) times
|
299 |
+
# continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th
|
300 |
+
# derivative should be continuous. This is the case if the (d+1)-th
|
301 |
+
# numerical derivative is reasonably small (smaller than `tol` in absolute
|
302 |
+
# value). We thus compute d-th numeric derivatives for d = 1, ..., `degree`
|
303 |
+
# and compare them to `tol`.
|
304 |
+
#
|
305 |
+
# Note that the 0-th derivative is the function itself, such that we are
|
306 |
+
# also checking its continuity.
|
307 |
+
for d in range(1, degree + 1):
|
308 |
+
# Check continuity of the (d-1)-th derivative
|
309 |
+
diff = np.diff(dXt, axis=0)
|
310 |
+
assert np.abs(diff).max() < tol
|
311 |
+
# Compute d-th numeric derivative
|
312 |
+
dXt = diff / delta
|
313 |
+
|
314 |
+
# As degree `degree` splines are not `degree` times continuously
|
315 |
+
# differentiable at the knots, the `degree + 1`-th numeric derivative
|
316 |
+
# should have spikes at the knots.
|
317 |
+
diff = np.diff(dXt, axis=0)
|
318 |
+
assert np.abs(diff).max() > 1
|
319 |
+
|
320 |
+
|
321 |
+
@pytest.mark.parametrize(["bias", "intercept"], [(True, False), (False, True)])
|
322 |
+
@pytest.mark.parametrize("degree", [1, 2, 3, 4, 5])
|
323 |
+
def test_spline_transformer_extrapolation(bias, intercept, degree):
|
324 |
+
"""Test that B-spline extrapolation works correctly."""
|
325 |
+
# we use a straight line for that
|
326 |
+
X = np.linspace(-1, 1, 100)[:, None]
|
327 |
+
y = X.squeeze()
|
328 |
+
|
329 |
+
# 'constant'
|
330 |
+
pipe = Pipeline(
|
331 |
+
[
|
332 |
+
[
|
333 |
+
"spline",
|
334 |
+
SplineTransformer(
|
335 |
+
n_knots=4,
|
336 |
+
degree=degree,
|
337 |
+
include_bias=bias,
|
338 |
+
extrapolation="constant",
|
339 |
+
),
|
340 |
+
],
|
341 |
+
["ols", LinearRegression(fit_intercept=intercept)],
|
342 |
+
]
|
343 |
+
)
|
344 |
+
pipe.fit(X, y)
|
345 |
+
assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])
|
346 |
+
|
347 |
+
# 'linear'
|
348 |
+
pipe = Pipeline(
|
349 |
+
[
|
350 |
+
[
|
351 |
+
"spline",
|
352 |
+
SplineTransformer(
|
353 |
+
n_knots=4,
|
354 |
+
degree=degree,
|
355 |
+
include_bias=bias,
|
356 |
+
extrapolation="linear",
|
357 |
+
),
|
358 |
+
],
|
359 |
+
["ols", LinearRegression(fit_intercept=intercept)],
|
360 |
+
]
|
361 |
+
)
|
362 |
+
pipe.fit(X, y)
|
363 |
+
assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])
|
364 |
+
|
365 |
+
# 'error'
|
366 |
+
splt = SplineTransformer(
|
367 |
+
n_knots=4, degree=degree, include_bias=bias, extrapolation="error"
|
368 |
+
)
|
369 |
+
splt.fit(X)
|
370 |
+
msg = "X contains values beyond the limits of the knots"
|
371 |
+
with pytest.raises(ValueError, match=msg):
|
372 |
+
splt.transform([[-10]])
|
373 |
+
with pytest.raises(ValueError, match=msg):
|
374 |
+
splt.transform([[5]])
|
375 |
+
|
376 |
+
|
377 |
+
def test_spline_transformer_kbindiscretizer():
|
378 |
+
"""Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer."""
|
379 |
+
rng = np.random.RandomState(97531)
|
380 |
+
X = rng.randn(200).reshape(200, 1)
|
381 |
+
n_bins = 5
|
382 |
+
n_knots = n_bins + 1
|
383 |
+
|
384 |
+
splt = SplineTransformer(
|
385 |
+
n_knots=n_knots, degree=0, knots="quantile", include_bias=True
|
386 |
+
)
|
387 |
+
splines = splt.fit_transform(X)
|
388 |
+
|
389 |
+
kbd = KBinsDiscretizer(n_bins=n_bins, encode="onehot-dense", strategy="quantile")
|
390 |
+
kbins = kbd.fit_transform(X)
|
391 |
+
|
392 |
+
# Though they should be exactly equal, we test approximately with high
|
393 |
+
# accuracy.
|
394 |
+
assert_allclose(splines, kbins, rtol=1e-13)
|
395 |
+
|
396 |
+
|
397 |
+
@pytest.mark.skipif(
|
398 |
+
sp_version < parse_version("1.8.0"),
|
399 |
+
reason="The option `sparse_output` is available as of scipy 1.8.0",
|
400 |
+
)
|
401 |
+
@pytest.mark.parametrize("degree", range(1, 3))
|
402 |
+
@pytest.mark.parametrize("knots", ["uniform", "quantile"])
|
403 |
+
@pytest.mark.parametrize(
|
404 |
+
"extrapolation", ["error", "constant", "linear", "continue", "periodic"]
|
405 |
+
)
|
406 |
+
@pytest.mark.parametrize("include_bias", [False, True])
|
407 |
+
def test_spline_transformer_sparse_output(
|
408 |
+
degree, knots, extrapolation, include_bias, global_random_seed
|
409 |
+
):
|
410 |
+
rng = np.random.RandomState(global_random_seed)
|
411 |
+
X = rng.randn(200).reshape(40, 5)
|
412 |
+
|
413 |
+
splt_dense = SplineTransformer(
|
414 |
+
degree=degree,
|
415 |
+
knots=knots,
|
416 |
+
extrapolation=extrapolation,
|
417 |
+
include_bias=include_bias,
|
418 |
+
sparse_output=False,
|
419 |
+
)
|
420 |
+
splt_sparse = SplineTransformer(
|
421 |
+
degree=degree,
|
422 |
+
knots=knots,
|
423 |
+
extrapolation=extrapolation,
|
424 |
+
include_bias=include_bias,
|
425 |
+
sparse_output=True,
|
426 |
+
)
|
427 |
+
|
428 |
+
splt_dense.fit(X)
|
429 |
+
splt_sparse.fit(X)
|
430 |
+
|
431 |
+
X_trans_sparse = splt_sparse.transform(X)
|
432 |
+
X_trans_dense = splt_dense.transform(X)
|
433 |
+
assert sparse.issparse(X_trans_sparse) and X_trans_sparse.format == "csr"
|
434 |
+
assert_allclose(X_trans_dense, X_trans_sparse.toarray())
|
435 |
+
|
436 |
+
# extrapolation regime
|
437 |
+
X_min = np.amin(X, axis=0)
|
438 |
+
X_max = np.amax(X, axis=0)
|
439 |
+
X_extra = np.r_[
|
440 |
+
np.linspace(X_min - 5, X_min, 10), np.linspace(X_max, X_max + 5, 10)
|
441 |
+
]
|
442 |
+
if extrapolation == "error":
|
443 |
+
msg = "X contains values beyond the limits of the knots"
|
444 |
+
with pytest.raises(ValueError, match=msg):
|
445 |
+
splt_dense.transform(X_extra)
|
446 |
+
msg = "Out of bounds"
|
447 |
+
with pytest.raises(ValueError, match=msg):
|
448 |
+
splt_sparse.transform(X_extra)
|
449 |
+
else:
|
450 |
+
assert_allclose(
|
451 |
+
splt_dense.transform(X_extra), splt_sparse.transform(X_extra).toarray()
|
452 |
+
)
|
453 |
+
|
454 |
+
|
455 |
+
@pytest.mark.skipif(
|
456 |
+
sp_version >= parse_version("1.8.0"),
|
457 |
+
reason="The option `sparse_output` is available as of scipy 1.8.0",
|
458 |
+
)
|
459 |
+
def test_spline_transformer_sparse_output_raise_error_for_old_scipy():
|
460 |
+
"""Test that SplineTransformer with sparse=True raises for scipy<1.8.0."""
|
461 |
+
X = [[1], [2]]
|
462 |
+
with pytest.raises(ValueError, match="scipy>=1.8.0"):
|
463 |
+
SplineTransformer(sparse_output=True).fit(X)
|
464 |
+
|
465 |
+
|
466 |
+
@pytest.mark.parametrize("n_knots", [5, 10])
|
467 |
+
@pytest.mark.parametrize("include_bias", [True, False])
|
468 |
+
@pytest.mark.parametrize("degree", [3, 4])
|
469 |
+
@pytest.mark.parametrize(
|
470 |
+
"extrapolation", ["error", "constant", "linear", "continue", "periodic"]
|
471 |
+
)
|
472 |
+
@pytest.mark.parametrize("sparse_output", [False, True])
|
473 |
+
def test_spline_transformer_n_features_out(
|
474 |
+
n_knots, include_bias, degree, extrapolation, sparse_output
|
475 |
+
):
|
476 |
+
"""Test that transform results in n_features_out_ features."""
|
477 |
+
if sparse_output and sp_version < parse_version("1.8.0"):
|
478 |
+
pytest.skip("The option `sparse_output` is available as of scipy 1.8.0")
|
479 |
+
|
480 |
+
splt = SplineTransformer(
|
481 |
+
n_knots=n_knots,
|
482 |
+
degree=degree,
|
483 |
+
include_bias=include_bias,
|
484 |
+
extrapolation=extrapolation,
|
485 |
+
sparse_output=sparse_output,
|
486 |
+
)
|
487 |
+
X = np.linspace(0, 1, 10)[:, None]
|
488 |
+
splt.fit(X)
|
489 |
+
|
490 |
+
assert splt.transform(X).shape[1] == splt.n_features_out_
|
491 |
+
|
492 |
+
|
493 |
+
@pytest.mark.parametrize(
|
494 |
+
"params, err_msg",
|
495 |
+
[
|
496 |
+
({"degree": (-1, 2)}, r"degree=\(min_degree, max_degree\) must"),
|
497 |
+
({"degree": (0, 1.5)}, r"degree=\(min_degree, max_degree\) must"),
|
498 |
+
({"degree": (3, 2)}, r"degree=\(min_degree, max_degree\) must"),
|
499 |
+
({"degree": (1, 2, 3)}, r"int or tuple \(min_degree, max_degree\)"),
|
500 |
+
],
|
501 |
+
)
|
502 |
+
def test_polynomial_features_input_validation(params, err_msg):
|
503 |
+
"""Test that we raise errors for invalid input in PolynomialFeatures."""
|
504 |
+
X = [[1], [2]]
|
505 |
+
|
506 |
+
with pytest.raises(ValueError, match=err_msg):
|
507 |
+
PolynomialFeatures(**params).fit(X)
|
508 |
+
|
509 |
+
|
510 |
+
@pytest.fixture()
|
511 |
+
def single_feature_degree3():
|
512 |
+
X = np.arange(6)[:, np.newaxis]
|
513 |
+
P = np.hstack([np.ones_like(X), X, X**2, X**3])
|
514 |
+
return X, P
|
515 |
+
|
516 |
+
|
517 |
+
@pytest.mark.parametrize(
|
518 |
+
"degree, include_bias, interaction_only, indices",
|
519 |
+
[
|
520 |
+
(3, True, False, slice(None, None)),
|
521 |
+
(3, False, False, slice(1, None)),
|
522 |
+
(3, True, True, [0, 1]),
|
523 |
+
(3, False, True, [1]),
|
524 |
+
((2, 3), True, False, [0, 2, 3]),
|
525 |
+
((2, 3), False, False, [2, 3]),
|
526 |
+
((2, 3), True, True, [0]),
|
527 |
+
((2, 3), False, True, []),
|
528 |
+
],
|
529 |
+
)
|
530 |
+
@pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS)
|
531 |
+
def test_polynomial_features_one_feature(
|
532 |
+
single_feature_degree3,
|
533 |
+
degree,
|
534 |
+
include_bias,
|
535 |
+
interaction_only,
|
536 |
+
indices,
|
537 |
+
X_container,
|
538 |
+
):
|
539 |
+
"""Test PolynomialFeatures on single feature up to degree 3."""
|
540 |
+
X, P = single_feature_degree3
|
541 |
+
if X_container is not None:
|
542 |
+
X = X_container(X)
|
543 |
+
tf = PolynomialFeatures(
|
544 |
+
degree=degree, include_bias=include_bias, interaction_only=interaction_only
|
545 |
+
).fit(X)
|
546 |
+
out = tf.transform(X)
|
547 |
+
if X_container is not None:
|
548 |
+
out = out.toarray()
|
549 |
+
assert_allclose(out, P[:, indices])
|
550 |
+
if tf.n_output_features_ > 0:
|
551 |
+
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
|
552 |
+
|
553 |
+
|
554 |
+
@pytest.fixture()
|
555 |
+
def two_features_degree3():
|
556 |
+
X = np.arange(6).reshape((3, 2))
|
557 |
+
x1 = X[:, :1]
|
558 |
+
x2 = X[:, 1:]
|
559 |
+
P = np.hstack(
|
560 |
+
[
|
561 |
+
x1**0 * x2**0, # 0
|
562 |
+
x1**1 * x2**0, # 1
|
563 |
+
x1**0 * x2**1, # 2
|
564 |
+
x1**2 * x2**0, # 3
|
565 |
+
x1**1 * x2**1, # 4
|
566 |
+
x1**0 * x2**2, # 5
|
567 |
+
x1**3 * x2**0, # 6
|
568 |
+
x1**2 * x2**1, # 7
|
569 |
+
x1**1 * x2**2, # 8
|
570 |
+
x1**0 * x2**3, # 9
|
571 |
+
]
|
572 |
+
)
|
573 |
+
return X, P
|
574 |
+
|
575 |
+
|
576 |
+
@pytest.mark.parametrize(
|
577 |
+
"degree, include_bias, interaction_only, indices",
|
578 |
+
[
|
579 |
+
(2, True, False, slice(0, 6)),
|
580 |
+
(2, False, False, slice(1, 6)),
|
581 |
+
(2, True, True, [0, 1, 2, 4]),
|
582 |
+
(2, False, True, [1, 2, 4]),
|
583 |
+
((2, 2), True, False, [0, 3, 4, 5]),
|
584 |
+
((2, 2), False, False, [3, 4, 5]),
|
585 |
+
((2, 2), True, True, [0, 4]),
|
586 |
+
((2, 2), False, True, [4]),
|
587 |
+
(3, True, False, slice(None, None)),
|
588 |
+
(3, False, False, slice(1, None)),
|
589 |
+
(3, True, True, [0, 1, 2, 4]),
|
590 |
+
(3, False, True, [1, 2, 4]),
|
591 |
+
((2, 3), True, False, [0, 3, 4, 5, 6, 7, 8, 9]),
|
592 |
+
((2, 3), False, False, slice(3, None)),
|
593 |
+
((2, 3), True, True, [0, 4]),
|
594 |
+
((2, 3), False, True, [4]),
|
595 |
+
((3, 3), True, False, [0, 6, 7, 8, 9]),
|
596 |
+
((3, 3), False, False, [6, 7, 8, 9]),
|
597 |
+
((3, 3), True, True, [0]),
|
598 |
+
((3, 3), False, True, []), # would need 3 input features
|
599 |
+
],
|
600 |
+
)
|
601 |
+
@pytest.mark.parametrize("X_container", [None] + CSR_CONTAINERS + CSC_CONTAINERS)
|
602 |
+
def test_polynomial_features_two_features(
|
603 |
+
two_features_degree3,
|
604 |
+
degree,
|
605 |
+
include_bias,
|
606 |
+
interaction_only,
|
607 |
+
indices,
|
608 |
+
X_container,
|
609 |
+
):
|
610 |
+
"""Test PolynomialFeatures on 2 features up to degree 3."""
|
611 |
+
X, P = two_features_degree3
|
612 |
+
if X_container is not None:
|
613 |
+
X = X_container(X)
|
614 |
+
tf = PolynomialFeatures(
|
615 |
+
degree=degree, include_bias=include_bias, interaction_only=interaction_only
|
616 |
+
).fit(X)
|
617 |
+
out = tf.transform(X)
|
618 |
+
if X_container is not None:
|
619 |
+
out = out.toarray()
|
620 |
+
assert_allclose(out, P[:, indices])
|
621 |
+
if tf.n_output_features_ > 0:
|
622 |
+
assert tf.powers_.shape == (tf.n_output_features_, tf.n_features_in_)
|
623 |
+
|
624 |
+
|
625 |
+
def test_polynomial_feature_names():
|
626 |
+
X = np.arange(30).reshape(10, 3)
|
627 |
+
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
|
628 |
+
feature_names = poly.get_feature_names_out()
|
629 |
+
assert_array_equal(
|
630 |
+
["1", "x0", "x1", "x2", "x0^2", "x0 x1", "x0 x2", "x1^2", "x1 x2", "x2^2"],
|
631 |
+
feature_names,
|
632 |
+
)
|
633 |
+
assert len(feature_names) == poly.transform(X).shape[1]
|
634 |
+
|
635 |
+
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
|
636 |
+
feature_names = poly.get_feature_names_out(["a", "b", "c"])
|
637 |
+
assert_array_equal(
|
638 |
+
[
|
639 |
+
"a",
|
640 |
+
"b",
|
641 |
+
"c",
|
642 |
+
"a^2",
|
643 |
+
"a b",
|
644 |
+
"a c",
|
645 |
+
"b^2",
|
646 |
+
"b c",
|
647 |
+
"c^2",
|
648 |
+
"a^3",
|
649 |
+
"a^2 b",
|
650 |
+
"a^2 c",
|
651 |
+
"a b^2",
|
652 |
+
"a b c",
|
653 |
+
"a c^2",
|
654 |
+
"b^3",
|
655 |
+
"b^2 c",
|
656 |
+
"b c^2",
|
657 |
+
"c^3",
|
658 |
+
],
|
659 |
+
feature_names,
|
660 |
+
)
|
661 |
+
assert len(feature_names) == poly.transform(X).shape[1]
|
662 |
+
|
663 |
+
poly = PolynomialFeatures(degree=(2, 3), include_bias=False).fit(X)
|
664 |
+
feature_names = poly.get_feature_names_out(["a", "b", "c"])
|
665 |
+
assert_array_equal(
|
666 |
+
[
|
667 |
+
"a^2",
|
668 |
+
"a b",
|
669 |
+
"a c",
|
670 |
+
"b^2",
|
671 |
+
"b c",
|
672 |
+
"c^2",
|
673 |
+
"a^3",
|
674 |
+
"a^2 b",
|
675 |
+
"a^2 c",
|
676 |
+
"a b^2",
|
677 |
+
"a b c",
|
678 |
+
"a c^2",
|
679 |
+
"b^3",
|
680 |
+
"b^2 c",
|
681 |
+
"b c^2",
|
682 |
+
"c^3",
|
683 |
+
],
|
684 |
+
feature_names,
|
685 |
+
)
|
686 |
+
assert len(feature_names) == poly.transform(X).shape[1]
|
687 |
+
|
688 |
+
poly = PolynomialFeatures(
|
689 |
+
degree=(3, 3), include_bias=True, interaction_only=True
|
690 |
+
).fit(X)
|
691 |
+
feature_names = poly.get_feature_names_out(["a", "b", "c"])
|
692 |
+
assert_array_equal(["1", "a b c"], feature_names)
|
693 |
+
assert len(feature_names) == poly.transform(X).shape[1]
|
694 |
+
|
695 |
+
# test some unicode
|
696 |
+
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
|
697 |
+
feature_names = poly.get_feature_names_out(["\u0001F40D", "\u262e", "\u05d0"])
|
698 |
+
assert_array_equal(["1", "\u0001F40D", "\u262e", "\u05d0"], feature_names)
|
699 |
+
|
700 |
+
|
701 |
+
@pytest.mark.parametrize(
|
702 |
+
["deg", "include_bias", "interaction_only", "dtype"],
|
703 |
+
[
|
704 |
+
(1, True, False, int),
|
705 |
+
(2, True, False, int),
|
706 |
+
(2, True, False, np.float32),
|
707 |
+
(2, True, False, np.float64),
|
708 |
+
(3, False, False, np.float64),
|
709 |
+
(3, False, True, np.float64),
|
710 |
+
(4, False, False, np.float64),
|
711 |
+
(4, False, True, np.float64),
|
712 |
+
],
|
713 |
+
)
|
714 |
+
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
|
715 |
+
def test_polynomial_features_csc_X(
|
716 |
+
deg, include_bias, interaction_only, dtype, csc_container
|
717 |
+
):
|
718 |
+
rng = np.random.RandomState(0)
|
719 |
+
X = rng.randint(0, 2, (100, 2))
|
720 |
+
X_csc = csc_container(X)
|
721 |
+
|
722 |
+
est = PolynomialFeatures(
|
723 |
+
deg, include_bias=include_bias, interaction_only=interaction_only
|
724 |
+
)
|
725 |
+
Xt_csc = est.fit_transform(X_csc.astype(dtype))
|
726 |
+
Xt_dense = est.fit_transform(X.astype(dtype))
|
727 |
+
|
728 |
+
assert sparse.issparse(Xt_csc) and Xt_csc.format == "csc"
|
729 |
+
assert Xt_csc.dtype == Xt_dense.dtype
|
730 |
+
assert_array_almost_equal(Xt_csc.toarray(), Xt_dense)
|
731 |
+
|
732 |
+
|
733 |
+
@pytest.mark.parametrize(
|
734 |
+
["deg", "include_bias", "interaction_only", "dtype"],
|
735 |
+
[
|
736 |
+
(1, True, False, int),
|
737 |
+
(2, True, False, int),
|
738 |
+
(2, True, False, np.float32),
|
739 |
+
(2, True, False, np.float64),
|
740 |
+
(3, False, False, np.float64),
|
741 |
+
(3, False, True, np.float64),
|
742 |
+
],
|
743 |
+
)
|
744 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
745 |
+
def test_polynomial_features_csr_X(
|
746 |
+
deg, include_bias, interaction_only, dtype, csr_container
|
747 |
+
):
|
748 |
+
rng = np.random.RandomState(0)
|
749 |
+
X = rng.randint(0, 2, (100, 2))
|
750 |
+
X_csr = csr_container(X)
|
751 |
+
|
752 |
+
est = PolynomialFeatures(
|
753 |
+
deg, include_bias=include_bias, interaction_only=interaction_only
|
754 |
+
)
|
755 |
+
Xt_csr = est.fit_transform(X_csr.astype(dtype))
|
756 |
+
Xt_dense = est.fit_transform(X.astype(dtype, copy=False))
|
757 |
+
|
758 |
+
assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr"
|
759 |
+
assert Xt_csr.dtype == Xt_dense.dtype
|
760 |
+
assert_array_almost_equal(Xt_csr.toarray(), Xt_dense)
|
761 |
+
|
762 |
+
|
763 |
+
@pytest.mark.parametrize("n_features", [1, 4, 5])
|
764 |
+
@pytest.mark.parametrize(
|
765 |
+
"min_degree, max_degree", [(0, 1), (0, 2), (1, 3), (0, 4), (3, 4)]
|
766 |
+
)
|
767 |
+
@pytest.mark.parametrize("interaction_only", [True, False])
|
768 |
+
@pytest.mark.parametrize("include_bias", [True, False])
|
769 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
770 |
+
def test_num_combinations(
|
771 |
+
n_features, min_degree, max_degree, interaction_only, include_bias, csr_container
|
772 |
+
):
|
773 |
+
"""
|
774 |
+
Test that n_output_features_ is calculated correctly.
|
775 |
+
"""
|
776 |
+
x = csr_container(([1], ([0], [n_features - 1])))
|
777 |
+
est = PolynomialFeatures(
|
778 |
+
degree=max_degree,
|
779 |
+
interaction_only=interaction_only,
|
780 |
+
include_bias=include_bias,
|
781 |
+
)
|
782 |
+
est.fit(x)
|
783 |
+
num_combos = est.n_output_features_
|
784 |
+
|
785 |
+
combos = PolynomialFeatures._combinations(
|
786 |
+
n_features=n_features,
|
787 |
+
min_degree=0,
|
788 |
+
max_degree=max_degree,
|
789 |
+
interaction_only=interaction_only,
|
790 |
+
include_bias=include_bias,
|
791 |
+
)
|
792 |
+
assert num_combos == sum([1 for _ in combos])
|
793 |
+
|
794 |
+
|
795 |
+
@pytest.mark.parametrize(
|
796 |
+
["deg", "include_bias", "interaction_only", "dtype"],
|
797 |
+
[
|
798 |
+
(2, True, False, np.float32),
|
799 |
+
(2, True, False, np.float64),
|
800 |
+
(3, False, False, np.float64),
|
801 |
+
(3, False, True, np.float64),
|
802 |
+
],
|
803 |
+
)
|
804 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
805 |
+
def test_polynomial_features_csr_X_floats(
|
806 |
+
deg, include_bias, interaction_only, dtype, csr_container
|
807 |
+
):
|
808 |
+
X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=0))
|
809 |
+
X = X_csr.toarray()
|
810 |
+
|
811 |
+
est = PolynomialFeatures(
|
812 |
+
deg, include_bias=include_bias, interaction_only=interaction_only
|
813 |
+
)
|
814 |
+
Xt_csr = est.fit_transform(X_csr.astype(dtype))
|
815 |
+
Xt_dense = est.fit_transform(X.astype(dtype))
|
816 |
+
|
817 |
+
assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr"
|
818 |
+
assert Xt_csr.dtype == Xt_dense.dtype
|
819 |
+
assert_array_almost_equal(Xt_csr.toarray(), Xt_dense)
|
820 |
+
|
821 |
+
|
822 |
+
@pytest.mark.parametrize(
|
823 |
+
["zero_row_index", "deg", "interaction_only"],
|
824 |
+
[
|
825 |
+
(0, 2, True),
|
826 |
+
(1, 2, True),
|
827 |
+
(2, 2, True),
|
828 |
+
(0, 3, True),
|
829 |
+
(1, 3, True),
|
830 |
+
(2, 3, True),
|
831 |
+
(0, 2, False),
|
832 |
+
(1, 2, False),
|
833 |
+
(2, 2, False),
|
834 |
+
(0, 3, False),
|
835 |
+
(1, 3, False),
|
836 |
+
(2, 3, False),
|
837 |
+
],
|
838 |
+
)
|
839 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
840 |
+
def test_polynomial_features_csr_X_zero_row(
|
841 |
+
zero_row_index, deg, interaction_only, csr_container
|
842 |
+
):
|
843 |
+
X_csr = csr_container(sparse_random(3, 10, 1.0, random_state=0))
|
844 |
+
X_csr[zero_row_index, :] = 0.0
|
845 |
+
X = X_csr.toarray()
|
846 |
+
|
847 |
+
est = PolynomialFeatures(deg, include_bias=False, interaction_only=interaction_only)
|
848 |
+
Xt_csr = est.fit_transform(X_csr)
|
849 |
+
Xt_dense = est.fit_transform(X)
|
850 |
+
|
851 |
+
assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr"
|
852 |
+
assert Xt_csr.dtype == Xt_dense.dtype
|
853 |
+
assert_array_almost_equal(Xt_csr.toarray(), Xt_dense)
|
854 |
+
|
855 |
+
|
856 |
+
# This degree should always be one more than the highest degree supported by
|
857 |
+
# _csr_expansion.
|
858 |
+
@pytest.mark.parametrize(
|
859 |
+
["include_bias", "interaction_only"],
|
860 |
+
[(True, True), (True, False), (False, True), (False, False)],
|
861 |
+
)
|
862 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
863 |
+
def test_polynomial_features_csr_X_degree_4(
|
864 |
+
include_bias, interaction_only, csr_container
|
865 |
+
):
|
866 |
+
X_csr = csr_container(sparse_random(1000, 10, 0.5, random_state=0))
|
867 |
+
X = X_csr.toarray()
|
868 |
+
|
869 |
+
est = PolynomialFeatures(
|
870 |
+
4, include_bias=include_bias, interaction_only=interaction_only
|
871 |
+
)
|
872 |
+
Xt_csr = est.fit_transform(X_csr)
|
873 |
+
Xt_dense = est.fit_transform(X)
|
874 |
+
|
875 |
+
assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr"
|
876 |
+
assert Xt_csr.dtype == Xt_dense.dtype
|
877 |
+
assert_array_almost_equal(Xt_csr.toarray(), Xt_dense)
|
878 |
+
|
879 |
+
|
880 |
+
@pytest.mark.parametrize(
|
881 |
+
["deg", "dim", "interaction_only"],
|
882 |
+
[
|
883 |
+
(2, 1, True),
|
884 |
+
(2, 2, True),
|
885 |
+
(3, 1, True),
|
886 |
+
(3, 2, True),
|
887 |
+
(3, 3, True),
|
888 |
+
(2, 1, False),
|
889 |
+
(2, 2, False),
|
890 |
+
(3, 1, False),
|
891 |
+
(3, 2, False),
|
892 |
+
(3, 3, False),
|
893 |
+
],
|
894 |
+
)
|
895 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
896 |
+
def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only, csr_container):
|
897 |
+
X_csr = csr_container(sparse_random(1000, dim, 0.5, random_state=0))
|
898 |
+
X = X_csr.toarray()
|
899 |
+
|
900 |
+
est = PolynomialFeatures(deg, interaction_only=interaction_only)
|
901 |
+
Xt_csr = est.fit_transform(X_csr)
|
902 |
+
Xt_dense = est.fit_transform(X)
|
903 |
+
|
904 |
+
assert sparse.issparse(Xt_csr) and Xt_csr.format == "csr"
|
905 |
+
assert Xt_csr.dtype == Xt_dense.dtype
|
906 |
+
assert_array_almost_equal(Xt_csr.toarray(), Xt_dense)
|
907 |
+
|
908 |
+
|
909 |
+
@pytest.mark.parametrize("interaction_only", [True, False])
|
910 |
+
@pytest.mark.parametrize("include_bias", [True, False])
|
911 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
912 |
+
def test_csr_polynomial_expansion_index_overflow_non_regression(
|
913 |
+
interaction_only, include_bias, csr_container
|
914 |
+
):
|
915 |
+
"""Check the automatic index dtype promotion to `np.int64` when needed.
|
916 |
+
|
917 |
+
This ensures that sufficiently large input configurations get
|
918 |
+
properly promoted to use `np.int64` for index and indptr representation
|
919 |
+
while preserving data integrity. Non-regression test for gh-16803.
|
920 |
+
|
921 |
+
Note that this is only possible for Python runtimes with a 64 bit address
|
922 |
+
space. On 32 bit platforms, a `ValueError` is raised instead.
|
923 |
+
"""
|
924 |
+
|
925 |
+
def degree_2_calc(d, i, j):
|
926 |
+
if interaction_only:
|
927 |
+
return d * i - (i**2 + 3 * i) // 2 - 1 + j
|
928 |
+
else:
|
929 |
+
return d * i - (i**2 + i) // 2 + j
|
930 |
+
|
931 |
+
n_samples = 13
|
932 |
+
n_features = 120001
|
933 |
+
data_dtype = np.float32
|
934 |
+
data = np.arange(1, 5, dtype=np.int64)
|
935 |
+
row = np.array([n_samples - 2, n_samples - 2, n_samples - 1, n_samples - 1])
|
936 |
+
# An int64 dtype is required to avoid overflow error on Windows within the
|
937 |
+
# `degree_2_calc` function.
|
938 |
+
col = np.array(
|
939 |
+
[n_features - 2, n_features - 1, n_features - 2, n_features - 1], dtype=np.int64
|
940 |
+
)
|
941 |
+
X = csr_container(
|
942 |
+
(data, (row, col)),
|
943 |
+
shape=(n_samples, n_features),
|
944 |
+
dtype=data_dtype,
|
945 |
+
)
|
946 |
+
pf = PolynomialFeatures(
|
947 |
+
interaction_only=interaction_only, include_bias=include_bias, degree=2
|
948 |
+
)
|
949 |
+
|
950 |
+
# Calculate the number of combinations a-priori, and if needed check for
|
951 |
+
# the correct ValueError and terminate the test early.
|
952 |
+
num_combinations = pf._num_combinations(
|
953 |
+
n_features=n_features,
|
954 |
+
min_degree=0,
|
955 |
+
max_degree=2,
|
956 |
+
interaction_only=pf.interaction_only,
|
957 |
+
include_bias=pf.include_bias,
|
958 |
+
)
|
959 |
+
if num_combinations > np.iinfo(np.intp).max:
|
960 |
+
msg = (
|
961 |
+
r"The output that would result from the current configuration would have"
|
962 |
+
r" \d* features which is too large to be indexed"
|
963 |
+
)
|
964 |
+
with pytest.raises(ValueError, match=msg):
|
965 |
+
pf.fit(X)
|
966 |
+
return
|
967 |
+
X_trans = pf.fit_transform(X)
|
968 |
+
row_nonzero, col_nonzero = X_trans.nonzero()
|
969 |
+
n_degree_1_features_out = n_features + include_bias
|
970 |
+
max_degree_2_idx = (
|
971 |
+
degree_2_calc(n_features, col[int(not interaction_only)], col[1])
|
972 |
+
+ n_degree_1_features_out
|
973 |
+
)
|
974 |
+
|
975 |
+
# Account for bias of all samples except last one which will be handled
|
976 |
+
# separately since there are distinct data values before it
|
977 |
+
data_target = [1] * (n_samples - 2) if include_bias else []
|
978 |
+
col_nonzero_target = [0] * (n_samples - 2) if include_bias else []
|
979 |
+
|
980 |
+
for i in range(2):
|
981 |
+
x = data[2 * i]
|
982 |
+
y = data[2 * i + 1]
|
983 |
+
x_idx = col[2 * i]
|
984 |
+
y_idx = col[2 * i + 1]
|
985 |
+
if include_bias:
|
986 |
+
data_target.append(1)
|
987 |
+
col_nonzero_target.append(0)
|
988 |
+
data_target.extend([x, y])
|
989 |
+
col_nonzero_target.extend(
|
990 |
+
[x_idx + int(include_bias), y_idx + int(include_bias)]
|
991 |
+
)
|
992 |
+
if not interaction_only:
|
993 |
+
data_target.extend([x * x, x * y, y * y])
|
994 |
+
col_nonzero_target.extend(
|
995 |
+
[
|
996 |
+
degree_2_calc(n_features, x_idx, x_idx) + n_degree_1_features_out,
|
997 |
+
degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out,
|
998 |
+
degree_2_calc(n_features, y_idx, y_idx) + n_degree_1_features_out,
|
999 |
+
]
|
1000 |
+
)
|
1001 |
+
else:
|
1002 |
+
data_target.extend([x * y])
|
1003 |
+
col_nonzero_target.append(
|
1004 |
+
degree_2_calc(n_features, x_idx, y_idx) + n_degree_1_features_out
|
1005 |
+
)
|
1006 |
+
|
1007 |
+
nnz_per_row = int(include_bias) + 3 + 2 * int(not interaction_only)
|
1008 |
+
|
1009 |
+
assert pf.n_output_features_ == max_degree_2_idx + 1
|
1010 |
+
assert X_trans.dtype == data_dtype
|
1011 |
+
assert X_trans.shape == (n_samples, max_degree_2_idx + 1)
|
1012 |
+
assert X_trans.indptr.dtype == X_trans.indices.dtype == np.int64
|
1013 |
+
# Ensure that dtype promotion was actually required:
|
1014 |
+
assert X_trans.indices.max() > np.iinfo(np.int32).max
|
1015 |
+
|
1016 |
+
row_nonzero_target = list(range(n_samples - 2)) if include_bias else []
|
1017 |
+
row_nonzero_target.extend(
|
1018 |
+
[n_samples - 2] * nnz_per_row + [n_samples - 1] * nnz_per_row
|
1019 |
+
)
|
1020 |
+
|
1021 |
+
assert_allclose(X_trans.data, data_target)
|
1022 |
+
assert_array_equal(row_nonzero, row_nonzero_target)
|
1023 |
+
assert_array_equal(col_nonzero, col_nonzero_target)
|
1024 |
+
|
1025 |
+
|
1026 |
+
@pytest.mark.parametrize(
|
1027 |
+
"degree, n_features",
|
1028 |
+
[
|
1029 |
+
# Needs promotion to int64 when interaction_only=False
|
1030 |
+
(2, 65535),
|
1031 |
+
(3, 2344),
|
1032 |
+
# This guarantees that the intermediate operation when calculating
|
1033 |
+
# output columns would overflow a C-long, hence checks that python-
|
1034 |
+
# longs are being used.
|
1035 |
+
(2, int(np.sqrt(np.iinfo(np.int64).max) + 1)),
|
1036 |
+
(3, 65535),
|
1037 |
+
# This case tests the second clause of the overflow check which
|
1038 |
+
# takes into account the value of `n_features` itself.
|
1039 |
+
(2, int(np.sqrt(np.iinfo(np.int64).max))),
|
1040 |
+
],
|
1041 |
+
)
|
1042 |
+
@pytest.mark.parametrize("interaction_only", [True, False])
|
1043 |
+
@pytest.mark.parametrize("include_bias", [True, False])
|
1044 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1045 |
+
def test_csr_polynomial_expansion_index_overflow(
|
1046 |
+
degree, n_features, interaction_only, include_bias, csr_container
|
1047 |
+
):
|
1048 |
+
"""Tests known edge-cases to the dtype promotion strategy and custom
|
1049 |
+
Cython code, including a current bug in the upstream
|
1050 |
+
`scipy.sparse.hstack`.
|
1051 |
+
"""
|
1052 |
+
data = [1.0]
|
1053 |
+
row = [0]
|
1054 |
+
col = [n_features - 1]
|
1055 |
+
|
1056 |
+
# First degree index
|
1057 |
+
expected_indices = [
|
1058 |
+
n_features - 1 + int(include_bias),
|
1059 |
+
]
|
1060 |
+
# Second degree index
|
1061 |
+
expected_indices.append(n_features * (n_features + 1) // 2 + expected_indices[0])
|
1062 |
+
# Third degree index
|
1063 |
+
expected_indices.append(
|
1064 |
+
n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1]
|
1065 |
+
)
|
1066 |
+
|
1067 |
+
X = csr_container((data, (row, col)))
|
1068 |
+
pf = PolynomialFeatures(
|
1069 |
+
interaction_only=interaction_only, include_bias=include_bias, degree=degree
|
1070 |
+
)
|
1071 |
+
|
1072 |
+
# Calculate the number of combinations a-priori, and if needed check for
|
1073 |
+
# the correct ValueError and terminate the test early.
|
1074 |
+
num_combinations = pf._num_combinations(
|
1075 |
+
n_features=n_features,
|
1076 |
+
min_degree=0,
|
1077 |
+
max_degree=degree,
|
1078 |
+
interaction_only=pf.interaction_only,
|
1079 |
+
include_bias=pf.include_bias,
|
1080 |
+
)
|
1081 |
+
if num_combinations > np.iinfo(np.intp).max:
|
1082 |
+
msg = (
|
1083 |
+
r"The output that would result from the current configuration would have"
|
1084 |
+
r" \d* features which is too large to be indexed"
|
1085 |
+
)
|
1086 |
+
with pytest.raises(ValueError, match=msg):
|
1087 |
+
pf.fit(X)
|
1088 |
+
return
|
1089 |
+
|
1090 |
+
# In SciPy < 1.8, a bug occurs when an intermediate matrix in
|
1091 |
+
# `to_stack` in `hstack` fits within int32 however would require int64 when
|
1092 |
+
# combined with all previous matrices in `to_stack`.
|
1093 |
+
if sp_version < parse_version("1.8.0"):
|
1094 |
+
has_bug = False
|
1095 |
+
max_int32 = np.iinfo(np.int32).max
|
1096 |
+
cumulative_size = n_features + include_bias
|
1097 |
+
for deg in range(2, degree + 1):
|
1098 |
+
max_indptr = _calc_total_nnz(X.indptr, interaction_only, deg)
|
1099 |
+
max_indices = _calc_expanded_nnz(n_features, interaction_only, deg) - 1
|
1100 |
+
cumulative_size += max_indices + 1
|
1101 |
+
needs_int64 = max(max_indices, max_indptr) > max_int32
|
1102 |
+
has_bug |= not needs_int64 and cumulative_size > max_int32
|
1103 |
+
if has_bug:
|
1104 |
+
msg = r"In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`"
|
1105 |
+
with pytest.raises(ValueError, match=msg):
|
1106 |
+
X_trans = pf.fit_transform(X)
|
1107 |
+
return
|
1108 |
+
|
1109 |
+
# When `n_features>=65535`, `scipy.sparse.hstack` may not use the right
|
1110 |
+
# dtype for representing indices and indptr if `n_features` is still
|
1111 |
+
# small enough so that each block matrix's indices and indptr arrays
|
1112 |
+
# can be represented with `np.int32`. We test `n_features==65535`
|
1113 |
+
# since it is guaranteed to run into this bug.
|
1114 |
+
if (
|
1115 |
+
sp_version < parse_version("1.9.2")
|
1116 |
+
and n_features == 65535
|
1117 |
+
and degree == 2
|
1118 |
+
and not interaction_only
|
1119 |
+
): # pragma: no cover
|
1120 |
+
msg = r"In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
|
1121 |
+
with pytest.raises(ValueError, match=msg):
|
1122 |
+
X_trans = pf.fit_transform(X)
|
1123 |
+
return
|
1124 |
+
X_trans = pf.fit_transform(X)
|
1125 |
+
|
1126 |
+
expected_dtype = np.int64 if num_combinations > np.iinfo(np.int32).max else np.int32
|
1127 |
+
# Terms higher than first degree
|
1128 |
+
non_bias_terms = 1 + (degree - 1) * int(not interaction_only)
|
1129 |
+
expected_nnz = int(include_bias) + non_bias_terms
|
1130 |
+
assert X_trans.dtype == X.dtype
|
1131 |
+
assert X_trans.shape == (1, pf.n_output_features_)
|
1132 |
+
assert X_trans.indptr.dtype == X_trans.indices.dtype == expected_dtype
|
1133 |
+
assert X_trans.nnz == expected_nnz
|
1134 |
+
|
1135 |
+
if include_bias:
|
1136 |
+
assert X_trans[0, 0] == pytest.approx(1.0)
|
1137 |
+
for idx in range(non_bias_terms):
|
1138 |
+
assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0)
|
1139 |
+
|
1140 |
+
offset = interaction_only * n_features
|
1141 |
+
if degree == 3:
|
1142 |
+
offset *= 1 + n_features
|
1143 |
+
assert pf.n_output_features_ == expected_indices[degree - 1] + 1 - offset
|
1144 |
+
|
1145 |
+
|
1146 |
+
@pytest.mark.parametrize("interaction_only", [True, False])
|
1147 |
+
@pytest.mark.parametrize("include_bias", [True, False])
|
1148 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1149 |
+
def test_csr_polynomial_expansion_too_large_to_index(
|
1150 |
+
interaction_only, include_bias, csr_container
|
1151 |
+
):
|
1152 |
+
n_features = np.iinfo(np.int64).max // 2
|
1153 |
+
data = [1.0]
|
1154 |
+
row = [0]
|
1155 |
+
col = [n_features - 1]
|
1156 |
+
X = csr_container((data, (row, col)))
|
1157 |
+
pf = PolynomialFeatures(
|
1158 |
+
interaction_only=interaction_only, include_bias=include_bias, degree=(2, 2)
|
1159 |
+
)
|
1160 |
+
msg = (
|
1161 |
+
r"The output that would result from the current configuration would have \d*"
|
1162 |
+
r" features which is too large to be indexed"
|
1163 |
+
)
|
1164 |
+
with pytest.raises(ValueError, match=msg):
|
1165 |
+
pf.fit(X)
|
1166 |
+
with pytest.raises(ValueError, match=msg):
|
1167 |
+
pf.fit_transform(X)
|
1168 |
+
|
1169 |
+
|
1170 |
+
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
|
1171 |
+
def test_polynomial_features_behaviour_on_zero_degree(sparse_container):
|
1172 |
+
"""Check that PolynomialFeatures raises error when degree=0 and include_bias=False,
|
1173 |
+
and output a single constant column when include_bias=True
|
1174 |
+
"""
|
1175 |
+
X = np.ones((10, 2))
|
1176 |
+
poly = PolynomialFeatures(degree=0, include_bias=False)
|
1177 |
+
err_msg = (
|
1178 |
+
"Setting degree to zero and include_bias to False would result in"
|
1179 |
+
" an empty output array."
|
1180 |
+
)
|
1181 |
+
with pytest.raises(ValueError, match=err_msg):
|
1182 |
+
poly.fit_transform(X)
|
1183 |
+
|
1184 |
+
poly = PolynomialFeatures(degree=(0, 0), include_bias=False)
|
1185 |
+
err_msg = (
|
1186 |
+
"Setting both min_degree and max_degree to zero and include_bias to"
|
1187 |
+
" False would result in an empty output array."
|
1188 |
+
)
|
1189 |
+
with pytest.raises(ValueError, match=err_msg):
|
1190 |
+
poly.fit_transform(X)
|
1191 |
+
|
1192 |
+
for _X in [X, sparse_container(X)]:
|
1193 |
+
poly = PolynomialFeatures(degree=0, include_bias=True)
|
1194 |
+
output = poly.fit_transform(_X)
|
1195 |
+
# convert to dense array if needed
|
1196 |
+
if sparse.issparse(output):
|
1197 |
+
output = output.toarray()
|
1198 |
+
assert_array_equal(output, np.ones((X.shape[0], 1)))
|
1199 |
+
|
1200 |
+
|
1201 |
+
def test_sizeof_LARGEST_INT_t():
|
1202 |
+
# On Windows, scikit-learn is typically compiled with MSVC that
|
1203 |
+
# does not support int128 arithmetic (at the time of writing):
|
1204 |
+
# https://stackoverflow.com/a/6761962/163740
|
1205 |
+
if sys.platform == "win32" or (
|
1206 |
+
sys.maxsize <= 2**32 and sys.platform != "emscripten"
|
1207 |
+
):
|
1208 |
+
expected_size = 8
|
1209 |
+
else:
|
1210 |
+
expected_size = 16
|
1211 |
+
|
1212 |
+
assert _get_sizeof_LARGEST_INT_t() == expected_size
|
1213 |
+
|
1214 |
+
|
1215 |
+
@pytest.mark.xfail(
|
1216 |
+
sys.platform == "win32",
|
1217 |
+
reason=(
|
1218 |
+
"On Windows, scikit-learn is typically compiled with MSVC that does not support"
|
1219 |
+
" int128 arithmetic (at the time of writing)"
|
1220 |
+
),
|
1221 |
+
run=True,
|
1222 |
+
)
|
1223 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
1224 |
+
def test_csr_polynomial_expansion_windows_fail(csr_container):
|
1225 |
+
# Minimum needed to ensure integer overflow occurs while guaranteeing an
|
1226 |
+
# int64-indexable output.
|
1227 |
+
n_features = int(np.iinfo(np.int64).max ** (1 / 3) + 3)
|
1228 |
+
data = [1.0]
|
1229 |
+
row = [0]
|
1230 |
+
col = [n_features - 1]
|
1231 |
+
|
1232 |
+
# First degree index
|
1233 |
+
expected_indices = [
|
1234 |
+
n_features - 1,
|
1235 |
+
]
|
1236 |
+
# Second degree index
|
1237 |
+
expected_indices.append(
|
1238 |
+
int(n_features * (n_features + 1) // 2 + expected_indices[0])
|
1239 |
+
)
|
1240 |
+
# Third degree index
|
1241 |
+
expected_indices.append(
|
1242 |
+
int(n_features * (n_features + 1) * (n_features + 2) // 6 + expected_indices[1])
|
1243 |
+
)
|
1244 |
+
|
1245 |
+
X = csr_container((data, (row, col)))
|
1246 |
+
pf = PolynomialFeatures(interaction_only=False, include_bias=False, degree=3)
|
1247 |
+
if sys.maxsize <= 2**32:
|
1248 |
+
msg = (
|
1249 |
+
r"The output that would result from the current configuration would"
|
1250 |
+
r" have \d*"
|
1251 |
+
r" features which is too large to be indexed"
|
1252 |
+
)
|
1253 |
+
with pytest.raises(ValueError, match=msg):
|
1254 |
+
pf.fit_transform(X)
|
1255 |
+
else:
|
1256 |
+
X_trans = pf.fit_transform(X)
|
1257 |
+
for idx in range(3):
|
1258 |
+
assert X_trans[0, expected_indices[idx]] == pytest.approx(1.0)
|
env-llmeval/lib/python3.10/site-packages/sklearn/preprocessing/tests/test_target_encoder.py
ADDED
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
6 |
+
|
7 |
+
from sklearn.ensemble import RandomForestRegressor
|
8 |
+
from sklearn.linear_model import Ridge
|
9 |
+
from sklearn.model_selection import (
|
10 |
+
KFold,
|
11 |
+
ShuffleSplit,
|
12 |
+
StratifiedKFold,
|
13 |
+
cross_val_score,
|
14 |
+
train_test_split,
|
15 |
+
)
|
16 |
+
from sklearn.pipeline import make_pipeline
|
17 |
+
from sklearn.preprocessing import (
|
18 |
+
KBinsDiscretizer,
|
19 |
+
LabelBinarizer,
|
20 |
+
LabelEncoder,
|
21 |
+
TargetEncoder,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
def _encode_target(X_ordinal, y_numeric, n_categories, smooth):
|
26 |
+
"""Simple Python implementation of target encoding."""
|
27 |
+
cur_encodings = np.zeros(n_categories, dtype=np.float64)
|
28 |
+
y_mean = np.mean(y_numeric)
|
29 |
+
|
30 |
+
if smooth == "auto":
|
31 |
+
y_variance = np.var(y_numeric)
|
32 |
+
for c in range(n_categories):
|
33 |
+
y_subset = y_numeric[X_ordinal == c]
|
34 |
+
n_i = y_subset.shape[0]
|
35 |
+
|
36 |
+
if n_i == 0:
|
37 |
+
cur_encodings[c] = y_mean
|
38 |
+
continue
|
39 |
+
|
40 |
+
y_subset_variance = np.var(y_subset)
|
41 |
+
m = y_subset_variance / y_variance
|
42 |
+
lambda_ = n_i / (n_i + m)
|
43 |
+
|
44 |
+
cur_encodings[c] = lambda_ * np.mean(y_subset) + (1 - lambda_) * y_mean
|
45 |
+
return cur_encodings
|
46 |
+
else: # float
|
47 |
+
for c in range(n_categories):
|
48 |
+
y_subset = y_numeric[X_ordinal == c]
|
49 |
+
current_sum = np.sum(y_subset) + y_mean * smooth
|
50 |
+
current_cnt = y_subset.shape[0] + smooth
|
51 |
+
cur_encodings[c] = current_sum / current_cnt
|
52 |
+
return cur_encodings
|
53 |
+
|
54 |
+
|
55 |
+
@pytest.mark.parametrize(
|
56 |
+
"categories, unknown_value",
|
57 |
+
[
|
58 |
+
([np.array([0, 1, 2], dtype=np.int64)], 4),
|
59 |
+
([np.array([1.0, 3.0, np.nan], dtype=np.float64)], 6.0),
|
60 |
+
([np.array(["cat", "dog", "snake"], dtype=object)], "bear"),
|
61 |
+
("auto", 3),
|
62 |
+
],
|
63 |
+
)
|
64 |
+
@pytest.mark.parametrize("smooth", [5.0, "auto"])
|
65 |
+
@pytest.mark.parametrize("target_type", ["binary", "continuous"])
|
66 |
+
def test_encoding(categories, unknown_value, global_random_seed, smooth, target_type):
|
67 |
+
"""Check encoding for binary and continuous targets.
|
68 |
+
|
69 |
+
Compare the values returned by `TargetEncoder.fit_transform` against the
|
70 |
+
expected encodings for cv splits from a naive reference Python
|
71 |
+
implementation in _encode_target.
|
72 |
+
"""
|
73 |
+
|
74 |
+
n_categories = 3
|
75 |
+
X_train_int_array = np.array([[0] * 20 + [1] * 30 + [2] * 40], dtype=np.int64).T
|
76 |
+
X_test_int_array = np.array([[0, 1, 2]], dtype=np.int64).T
|
77 |
+
n_samples = X_train_int_array.shape[0]
|
78 |
+
|
79 |
+
if categories == "auto":
|
80 |
+
X_train = X_train_int_array
|
81 |
+
X_test = X_test_int_array
|
82 |
+
else:
|
83 |
+
X_train = categories[0][X_train_int_array]
|
84 |
+
X_test = categories[0][X_test_int_array]
|
85 |
+
|
86 |
+
X_test = np.concatenate((X_test, [[unknown_value]]))
|
87 |
+
|
88 |
+
data_rng = np.random.RandomState(global_random_seed)
|
89 |
+
n_splits = 3
|
90 |
+
if target_type == "binary":
|
91 |
+
y_numeric = data_rng.randint(low=0, high=2, size=n_samples)
|
92 |
+
target_names = np.array(["cat", "dog"], dtype=object)
|
93 |
+
y_train = target_names[y_numeric]
|
94 |
+
|
95 |
+
else:
|
96 |
+
assert target_type == "continuous"
|
97 |
+
y_numeric = data_rng.uniform(low=-10, high=20, size=n_samples)
|
98 |
+
y_train = y_numeric
|
99 |
+
|
100 |
+
shuffled_idx = data_rng.permutation(n_samples)
|
101 |
+
X_train_int_array = X_train_int_array[shuffled_idx]
|
102 |
+
X_train = X_train[shuffled_idx]
|
103 |
+
y_train = y_train[shuffled_idx]
|
104 |
+
y_numeric = y_numeric[shuffled_idx]
|
105 |
+
|
106 |
+
# Define our CV splitting strategy
|
107 |
+
if target_type == "binary":
|
108 |
+
cv = StratifiedKFold(
|
109 |
+
n_splits=n_splits, random_state=global_random_seed, shuffle=True
|
110 |
+
)
|
111 |
+
else:
|
112 |
+
cv = KFold(n_splits=n_splits, random_state=global_random_seed, shuffle=True)
|
113 |
+
|
114 |
+
# Compute the expected values using our reference Python implementation of
|
115 |
+
# target encoding:
|
116 |
+
expected_X_fit_transform = np.empty_like(X_train_int_array, dtype=np.float64)
|
117 |
+
|
118 |
+
for train_idx, test_idx in cv.split(X_train_int_array, y_train):
|
119 |
+
X_, y_ = X_train_int_array[train_idx, 0], y_numeric[train_idx]
|
120 |
+
cur_encodings = _encode_target(X_, y_, n_categories, smooth)
|
121 |
+
expected_X_fit_transform[test_idx, 0] = cur_encodings[
|
122 |
+
X_train_int_array[test_idx, 0]
|
123 |
+
]
|
124 |
+
|
125 |
+
# Check that we can obtain the same encodings by calling `fit_transform` on
|
126 |
+
# the estimator with the same CV parameters:
|
127 |
+
target_encoder = TargetEncoder(
|
128 |
+
smooth=smooth,
|
129 |
+
categories=categories,
|
130 |
+
cv=n_splits,
|
131 |
+
random_state=global_random_seed,
|
132 |
+
)
|
133 |
+
|
134 |
+
X_fit_transform = target_encoder.fit_transform(X_train, y_train)
|
135 |
+
|
136 |
+
assert target_encoder.target_type_ == target_type
|
137 |
+
assert_allclose(X_fit_transform, expected_X_fit_transform)
|
138 |
+
assert len(target_encoder.encodings_) == 1
|
139 |
+
if target_type == "binary":
|
140 |
+
assert_array_equal(target_encoder.classes_, target_names)
|
141 |
+
else:
|
142 |
+
assert target_encoder.classes_ is None
|
143 |
+
|
144 |
+
# compute encodings for all data to validate `transform`
|
145 |
+
y_mean = np.mean(y_numeric)
|
146 |
+
expected_encodings = _encode_target(
|
147 |
+
X_train_int_array[:, 0], y_numeric, n_categories, smooth
|
148 |
+
)
|
149 |
+
assert_allclose(target_encoder.encodings_[0], expected_encodings)
|
150 |
+
assert target_encoder.target_mean_ == pytest.approx(y_mean)
|
151 |
+
|
152 |
+
# Transform on test data, the last value is unknown so it is encoded as the target
|
153 |
+
# mean
|
154 |
+
expected_X_test_transform = np.concatenate(
|
155 |
+
(expected_encodings, np.array([y_mean]))
|
156 |
+
).reshape(-1, 1)
|
157 |
+
|
158 |
+
X_test_transform = target_encoder.transform(X_test)
|
159 |
+
assert_allclose(X_test_transform, expected_X_test_transform)
|
160 |
+
|
161 |
+
|
162 |
+
@pytest.mark.parametrize(
|
163 |
+
"categories, unknown_values",
|
164 |
+
[
|
165 |
+
([np.array([0, 1, 2], dtype=np.int64)], "auto"),
|
166 |
+
([np.array(["cat", "dog", "snake"], dtype=object)], ["bear", "rabbit"]),
|
167 |
+
],
|
168 |
+
)
|
169 |
+
@pytest.mark.parametrize(
|
170 |
+
"target_labels", [np.array([1, 2, 3]), np.array(["a", "b", "c"])]
|
171 |
+
)
|
172 |
+
@pytest.mark.parametrize("smooth", [5.0, "auto"])
|
173 |
+
def test_encoding_multiclass(
|
174 |
+
global_random_seed, categories, unknown_values, target_labels, smooth
|
175 |
+
):
|
176 |
+
"""Check encoding for multiclass targets."""
|
177 |
+
rng = np.random.RandomState(global_random_seed)
|
178 |
+
|
179 |
+
n_samples = 80
|
180 |
+
n_features = 2
|
181 |
+
feat_1_int = np.array(rng.randint(low=0, high=2, size=n_samples))
|
182 |
+
feat_2_int = np.array(rng.randint(low=0, high=3, size=n_samples))
|
183 |
+
feat_1 = categories[0][feat_1_int]
|
184 |
+
feat_2 = categories[0][feat_2_int]
|
185 |
+
X_train = np.column_stack((feat_1, feat_2))
|
186 |
+
X_train_int = np.column_stack((feat_1_int, feat_2_int))
|
187 |
+
categories_ = [[0, 1], [0, 1, 2]]
|
188 |
+
|
189 |
+
n_classes = 3
|
190 |
+
y_train_int = np.array(rng.randint(low=0, high=n_classes, size=n_samples))
|
191 |
+
y_train = target_labels[y_train_int]
|
192 |
+
y_train_enc = LabelBinarizer().fit_transform(y_train)
|
193 |
+
|
194 |
+
n_splits = 3
|
195 |
+
cv = StratifiedKFold(
|
196 |
+
n_splits=n_splits, random_state=global_random_seed, shuffle=True
|
197 |
+
)
|
198 |
+
|
199 |
+
# Manually compute encodings for cv splits to validate `fit_transform`
|
200 |
+
expected_X_fit_transform = np.empty(
|
201 |
+
(X_train_int.shape[0], X_train_int.shape[1] * n_classes),
|
202 |
+
dtype=np.float64,
|
203 |
+
)
|
204 |
+
for f_idx, cats in enumerate(categories_):
|
205 |
+
for c_idx in range(n_classes):
|
206 |
+
for train_idx, test_idx in cv.split(X_train, y_train):
|
207 |
+
y_class = y_train_enc[:, c_idx]
|
208 |
+
X_, y_ = X_train_int[train_idx, f_idx], y_class[train_idx]
|
209 |
+
current_encoding = _encode_target(X_, y_, len(cats), smooth)
|
210 |
+
# f_idx: 0, 0, 0, 1, 1, 1
|
211 |
+
# c_idx: 0, 1, 2, 0, 1, 2
|
212 |
+
# exp_idx: 0, 1, 2, 3, 4, 5
|
213 |
+
exp_idx = c_idx + (f_idx * n_classes)
|
214 |
+
expected_X_fit_transform[test_idx, exp_idx] = current_encoding[
|
215 |
+
X_train_int[test_idx, f_idx]
|
216 |
+
]
|
217 |
+
|
218 |
+
target_encoder = TargetEncoder(
|
219 |
+
smooth=smooth,
|
220 |
+
cv=n_splits,
|
221 |
+
random_state=global_random_seed,
|
222 |
+
)
|
223 |
+
X_fit_transform = target_encoder.fit_transform(X_train, y_train)
|
224 |
+
|
225 |
+
assert target_encoder.target_type_ == "multiclass"
|
226 |
+
assert_allclose(X_fit_transform, expected_X_fit_transform)
|
227 |
+
|
228 |
+
# Manually compute encoding to validate `transform`
|
229 |
+
expected_encodings = []
|
230 |
+
for f_idx, cats in enumerate(categories_):
|
231 |
+
for c_idx in range(n_classes):
|
232 |
+
y_class = y_train_enc[:, c_idx]
|
233 |
+
current_encoding = _encode_target(
|
234 |
+
X_train_int[:, f_idx], y_class, len(cats), smooth
|
235 |
+
)
|
236 |
+
expected_encodings.append(current_encoding)
|
237 |
+
|
238 |
+
assert len(target_encoder.encodings_) == n_features * n_classes
|
239 |
+
for i in range(n_features * n_classes):
|
240 |
+
assert_allclose(target_encoder.encodings_[i], expected_encodings[i])
|
241 |
+
assert_array_equal(target_encoder.classes_, target_labels)
|
242 |
+
|
243 |
+
# Include unknown values at the end
|
244 |
+
X_test_int = np.array([[0, 1], [1, 2], [4, 5]])
|
245 |
+
if unknown_values == "auto":
|
246 |
+
X_test = X_test_int
|
247 |
+
else:
|
248 |
+
X_test = np.empty_like(X_test_int[:-1, :], dtype=object)
|
249 |
+
for column_idx in range(X_test_int.shape[1]):
|
250 |
+
X_test[:, column_idx] = categories[0][X_test_int[:-1, column_idx]]
|
251 |
+
# Add unknown values at end
|
252 |
+
X_test = np.vstack((X_test, unknown_values))
|
253 |
+
|
254 |
+
y_mean = np.mean(y_train_enc, axis=0)
|
255 |
+
expected_X_test_transform = np.empty(
|
256 |
+
(X_test_int.shape[0], X_test_int.shape[1] * n_classes),
|
257 |
+
dtype=np.float64,
|
258 |
+
)
|
259 |
+
n_rows = X_test_int.shape[0]
|
260 |
+
f_idx = [0, 0, 0, 1, 1, 1]
|
261 |
+
# Last row are unknowns, dealt with later
|
262 |
+
for row_idx in range(n_rows - 1):
|
263 |
+
for i, enc in enumerate(expected_encodings):
|
264 |
+
expected_X_test_transform[row_idx, i] = enc[X_test_int[row_idx, f_idx[i]]]
|
265 |
+
|
266 |
+
# Unknowns encoded as target mean for each class
|
267 |
+
# `y_mean` contains target mean for each class, thus cycle through mean of
|
268 |
+
# each class, `n_features` times
|
269 |
+
mean_idx = [0, 1, 2, 0, 1, 2]
|
270 |
+
for i in range(n_classes * n_features):
|
271 |
+
expected_X_test_transform[n_rows - 1, i] = y_mean[mean_idx[i]]
|
272 |
+
|
273 |
+
X_test_transform = target_encoder.transform(X_test)
|
274 |
+
assert_allclose(X_test_transform, expected_X_test_transform)
|
275 |
+
|
276 |
+
|
277 |
+
@pytest.mark.parametrize(
|
278 |
+
"X, categories",
|
279 |
+
[
|
280 |
+
(
|
281 |
+
np.array([[0] * 10 + [1] * 10 + [3]], dtype=np.int64).T, # 3 is unknown
|
282 |
+
[[0, 1, 2]],
|
283 |
+
),
|
284 |
+
(
|
285 |
+
np.array(
|
286 |
+
[["cat"] * 10 + ["dog"] * 10 + ["snake"]], dtype=object
|
287 |
+
).T, # snake is unknown
|
288 |
+
[["dog", "cat", "cow"]],
|
289 |
+
),
|
290 |
+
],
|
291 |
+
)
|
292 |
+
@pytest.mark.parametrize("smooth", [4.0, "auto"])
|
293 |
+
def test_custom_categories(X, categories, smooth):
|
294 |
+
"""Custom categories with unknown categories that are not in training data."""
|
295 |
+
rng = np.random.RandomState(0)
|
296 |
+
y = rng.uniform(low=-10, high=20, size=X.shape[0])
|
297 |
+
enc = TargetEncoder(categories=categories, smooth=smooth, random_state=0).fit(X, y)
|
298 |
+
|
299 |
+
# The last element is unknown and encoded as the mean
|
300 |
+
y_mean = y.mean()
|
301 |
+
X_trans = enc.transform(X[-1:])
|
302 |
+
assert X_trans[0, 0] == pytest.approx(y_mean)
|
303 |
+
|
304 |
+
assert len(enc.encodings_) == 1
|
305 |
+
# custom category that is not in training data
|
306 |
+
assert enc.encodings_[0][-1] == pytest.approx(y_mean)
|
307 |
+
|
308 |
+
|
309 |
+
@pytest.mark.parametrize(
|
310 |
+
"y, msg",
|
311 |
+
[
|
312 |
+
([1, 2, 0, 1], "Found input variables with inconsistent"),
|
313 |
+
(
|
314 |
+
np.array([[1, 2, 0], [1, 2, 3]]).T,
|
315 |
+
"Target type was inferred to be 'multiclass-multioutput'",
|
316 |
+
),
|
317 |
+
],
|
318 |
+
)
|
319 |
+
def test_errors(y, msg):
|
320 |
+
"""Check invalidate input."""
|
321 |
+
X = np.array([[1, 0, 1]]).T
|
322 |
+
|
323 |
+
enc = TargetEncoder()
|
324 |
+
with pytest.raises(ValueError, match=msg):
|
325 |
+
enc.fit_transform(X, y)
|
326 |
+
|
327 |
+
|
328 |
+
def test_use_regression_target():
|
329 |
+
"""Check inferred and specified `target_type` on regression target."""
|
330 |
+
X = np.array([[0, 1, 0, 1, 0, 1]]).T
|
331 |
+
y = np.array([1.0, 2.0, 3.0, 2.0, 3.0, 4.0])
|
332 |
+
|
333 |
+
enc = TargetEncoder(cv=2)
|
334 |
+
with pytest.warns(
|
335 |
+
UserWarning,
|
336 |
+
match=re.escape(
|
337 |
+
"The least populated class in y has only 1 members, which is less than"
|
338 |
+
" n_splits=2."
|
339 |
+
),
|
340 |
+
):
|
341 |
+
enc.fit_transform(X, y)
|
342 |
+
assert enc.target_type_ == "multiclass"
|
343 |
+
|
344 |
+
enc = TargetEncoder(cv=2, target_type="continuous")
|
345 |
+
enc.fit_transform(X, y)
|
346 |
+
assert enc.target_type_ == "continuous"
|
347 |
+
|
348 |
+
|
349 |
+
@pytest.mark.parametrize(
|
350 |
+
"y, feature_names",
|
351 |
+
[
|
352 |
+
([1, 2] * 10, ["A", "B"]),
|
353 |
+
([1, 2, 3] * 6 + [1, 2], ["A_1", "A_2", "A_3", "B_1", "B_2", "B_3"]),
|
354 |
+
(
|
355 |
+
["y1", "y2", "y3"] * 6 + ["y1", "y2"],
|
356 |
+
["A_y1", "A_y2", "A_y3", "B_y1", "B_y2", "B_y3"],
|
357 |
+
),
|
358 |
+
],
|
359 |
+
)
|
360 |
+
def test_feature_names_out_set_output(y, feature_names):
|
361 |
+
"""Check TargetEncoder works with set_output."""
|
362 |
+
pd = pytest.importorskip("pandas")
|
363 |
+
|
364 |
+
X_df = pd.DataFrame({"A": ["a", "b"] * 10, "B": [1, 2] * 10})
|
365 |
+
|
366 |
+
enc_default = TargetEncoder(cv=2, smooth=3.0, random_state=0)
|
367 |
+
enc_default.set_output(transform="default")
|
368 |
+
enc_pandas = TargetEncoder(cv=2, smooth=3.0, random_state=0)
|
369 |
+
enc_pandas.set_output(transform="pandas")
|
370 |
+
|
371 |
+
X_default = enc_default.fit_transform(X_df, y)
|
372 |
+
X_pandas = enc_pandas.fit_transform(X_df, y)
|
373 |
+
|
374 |
+
assert_allclose(X_pandas.to_numpy(), X_default)
|
375 |
+
assert_array_equal(enc_pandas.get_feature_names_out(), feature_names)
|
376 |
+
assert_array_equal(enc_pandas.get_feature_names_out(), X_pandas.columns)
|
377 |
+
|
378 |
+
|
379 |
+
@pytest.mark.parametrize("to_pandas", [True, False])
|
380 |
+
@pytest.mark.parametrize("smooth", [1.0, "auto"])
|
381 |
+
@pytest.mark.parametrize("target_type", ["binary-ints", "binary-str", "continuous"])
|
382 |
+
def test_multiple_features_quick(to_pandas, smooth, target_type):
|
383 |
+
"""Check target encoder with multiple features."""
|
384 |
+
X_ordinal = np.array(
|
385 |
+
[[1, 1], [0, 1], [1, 1], [2, 1], [1, 0], [0, 1], [1, 0], [0, 0]], dtype=np.int64
|
386 |
+
)
|
387 |
+
if target_type == "binary-str":
|
388 |
+
y_train = np.array(["a", "b", "a", "a", "b", "b", "a", "b"])
|
389 |
+
y_integer = LabelEncoder().fit_transform(y_train)
|
390 |
+
cv = StratifiedKFold(2, random_state=0, shuffle=True)
|
391 |
+
elif target_type == "binary-ints":
|
392 |
+
y_train = np.array([3, 4, 3, 3, 3, 4, 4, 4])
|
393 |
+
y_integer = LabelEncoder().fit_transform(y_train)
|
394 |
+
cv = StratifiedKFold(2, random_state=0, shuffle=True)
|
395 |
+
else:
|
396 |
+
y_train = np.array([3.0, 5.1, 2.4, 3.5, 4.1, 5.5, 10.3, 7.3], dtype=np.float32)
|
397 |
+
y_integer = y_train
|
398 |
+
cv = KFold(2, random_state=0, shuffle=True)
|
399 |
+
y_mean = np.mean(y_integer)
|
400 |
+
categories = [[0, 1, 2], [0, 1]]
|
401 |
+
|
402 |
+
X_test = np.array(
|
403 |
+
[
|
404 |
+
[0, 1],
|
405 |
+
[3, 0], # 3 is unknown
|
406 |
+
[1, 10], # 10 is unknown
|
407 |
+
],
|
408 |
+
dtype=np.int64,
|
409 |
+
)
|
410 |
+
|
411 |
+
if to_pandas:
|
412 |
+
pd = pytest.importorskip("pandas")
|
413 |
+
# convert second feature to an object
|
414 |
+
X_train = pd.DataFrame(
|
415 |
+
{
|
416 |
+
"feat0": X_ordinal[:, 0],
|
417 |
+
"feat1": np.array(["cat", "dog"], dtype=object)[X_ordinal[:, 1]],
|
418 |
+
}
|
419 |
+
)
|
420 |
+
# "snake" is unknown
|
421 |
+
X_test = pd.DataFrame({"feat0": X_test[:, 0], "feat1": ["dog", "cat", "snake"]})
|
422 |
+
else:
|
423 |
+
X_train = X_ordinal
|
424 |
+
|
425 |
+
# manually compute encoding for fit_transform
|
426 |
+
expected_X_fit_transform = np.empty_like(X_ordinal, dtype=np.float64)
|
427 |
+
for f_idx, cats in enumerate(categories):
|
428 |
+
for train_idx, test_idx in cv.split(X_ordinal, y_integer):
|
429 |
+
X_, y_ = X_ordinal[train_idx, f_idx], y_integer[train_idx]
|
430 |
+
current_encoding = _encode_target(X_, y_, len(cats), smooth)
|
431 |
+
expected_X_fit_transform[test_idx, f_idx] = current_encoding[
|
432 |
+
X_ordinal[test_idx, f_idx]
|
433 |
+
]
|
434 |
+
|
435 |
+
# manually compute encoding for transform
|
436 |
+
expected_encodings = []
|
437 |
+
for f_idx, cats in enumerate(categories):
|
438 |
+
current_encoding = _encode_target(
|
439 |
+
X_ordinal[:, f_idx], y_integer, len(cats), smooth
|
440 |
+
)
|
441 |
+
expected_encodings.append(current_encoding)
|
442 |
+
|
443 |
+
expected_X_test_transform = np.array(
|
444 |
+
[
|
445 |
+
[expected_encodings[0][0], expected_encodings[1][1]],
|
446 |
+
[y_mean, expected_encodings[1][0]],
|
447 |
+
[expected_encodings[0][1], y_mean],
|
448 |
+
],
|
449 |
+
dtype=np.float64,
|
450 |
+
)
|
451 |
+
|
452 |
+
enc = TargetEncoder(smooth=smooth, cv=2, random_state=0)
|
453 |
+
X_fit_transform = enc.fit_transform(X_train, y_train)
|
454 |
+
assert_allclose(X_fit_transform, expected_X_fit_transform)
|
455 |
+
|
456 |
+
assert len(enc.encodings_) == 2
|
457 |
+
for i in range(2):
|
458 |
+
assert_allclose(enc.encodings_[i], expected_encodings[i])
|
459 |
+
|
460 |
+
X_test_transform = enc.transform(X_test)
|
461 |
+
assert_allclose(X_test_transform, expected_X_test_transform)
|
462 |
+
|
463 |
+
|
464 |
+
@pytest.mark.parametrize(
|
465 |
+
"y, y_mean",
|
466 |
+
[
|
467 |
+
(np.array([3.4] * 20), 3.4),
|
468 |
+
(np.array([0] * 20), 0),
|
469 |
+
(np.array(["a"] * 20, dtype=object), 0),
|
470 |
+
],
|
471 |
+
ids=["continuous", "binary", "binary-string"],
|
472 |
+
)
|
473 |
+
@pytest.mark.parametrize("smooth", ["auto", 4.0, 0.0])
|
474 |
+
def test_constant_target_and_feature(y, y_mean, smooth):
|
475 |
+
"""Check edge case where feature and target is constant."""
|
476 |
+
X = np.array([[1] * 20]).T
|
477 |
+
n_samples = X.shape[0]
|
478 |
+
|
479 |
+
enc = TargetEncoder(cv=2, smooth=smooth, random_state=0)
|
480 |
+
X_trans = enc.fit_transform(X, y)
|
481 |
+
assert_allclose(X_trans, np.repeat([[y_mean]], n_samples, axis=0))
|
482 |
+
assert enc.encodings_[0][0] == pytest.approx(y_mean)
|
483 |
+
assert enc.target_mean_ == pytest.approx(y_mean)
|
484 |
+
|
485 |
+
X_test = np.array([[1], [0]])
|
486 |
+
X_test_trans = enc.transform(X_test)
|
487 |
+
assert_allclose(X_test_trans, np.repeat([[y_mean]], 2, axis=0))
|
488 |
+
|
489 |
+
|
490 |
+
def test_fit_transform_not_associated_with_y_if_ordinal_categorical_is_not(
|
491 |
+
global_random_seed,
|
492 |
+
):
|
493 |
+
cardinality = 30 # not too large, otherwise we need a very large n_samples
|
494 |
+
n_samples = 3000
|
495 |
+
rng = np.random.RandomState(global_random_seed)
|
496 |
+
y_train = rng.normal(size=n_samples)
|
497 |
+
X_train = rng.randint(0, cardinality, size=n_samples).reshape(-1, 1)
|
498 |
+
|
499 |
+
# Sort by y_train to attempt to cause a leak
|
500 |
+
y_sorted_indices = y_train.argsort()
|
501 |
+
y_train = y_train[y_sorted_indices]
|
502 |
+
X_train = X_train[y_sorted_indices]
|
503 |
+
|
504 |
+
target_encoder = TargetEncoder(shuffle=True, random_state=global_random_seed)
|
505 |
+
X_encoded_train_shuffled = target_encoder.fit_transform(X_train, y_train)
|
506 |
+
|
507 |
+
target_encoder = TargetEncoder(shuffle=False)
|
508 |
+
X_encoded_train_no_shuffled = target_encoder.fit_transform(X_train, y_train)
|
509 |
+
|
510 |
+
# Check that no information about y_train has leaked into X_train:
|
511 |
+
regressor = RandomForestRegressor(
|
512 |
+
n_estimators=10, min_samples_leaf=20, random_state=global_random_seed
|
513 |
+
)
|
514 |
+
|
515 |
+
# It's impossible to learn a good predictive model on the training set when
|
516 |
+
# using the original representation X_train or the target encoded
|
517 |
+
# representation with shuffled inner CV. For the latter, no information
|
518 |
+
# about y_train has inadvertently leaked into the prior used to generate
|
519 |
+
# `X_encoded_train_shuffled`:
|
520 |
+
cv = ShuffleSplit(n_splits=50, random_state=global_random_seed)
|
521 |
+
assert cross_val_score(regressor, X_train, y_train, cv=cv).mean() < 0.1
|
522 |
+
assert (
|
523 |
+
cross_val_score(regressor, X_encoded_train_shuffled, y_train, cv=cv).mean()
|
524 |
+
< 0.1
|
525 |
+
)
|
526 |
+
|
527 |
+
# Without the inner CV shuffling, a lot of information about y_train goes into the
|
528 |
+
# the per-fold y_train.mean() priors: shrinkage is no longer effective in this
|
529 |
+
# case and would no longer be able to prevent downstream over-fitting.
|
530 |
+
assert (
|
531 |
+
cross_val_score(regressor, X_encoded_train_no_shuffled, y_train, cv=cv).mean()
|
532 |
+
> 0.5
|
533 |
+
)
|
534 |
+
|
535 |
+
|
536 |
+
def test_smooth_zero():
|
537 |
+
"""Check edge case with zero smoothing and cv does not contain category."""
|
538 |
+
X = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]).T
|
539 |
+
y = np.array([2.1, 4.3, 1.2, 3.1, 1.0, 9.0, 10.3, 14.2, 13.3, 15.0])
|
540 |
+
|
541 |
+
enc = TargetEncoder(smooth=0.0, shuffle=False, cv=2)
|
542 |
+
X_trans = enc.fit_transform(X, y)
|
543 |
+
|
544 |
+
# With cv = 2, category 0 does not exist in the second half, thus
|
545 |
+
# it will be encoded as the mean of the second half
|
546 |
+
assert_allclose(X_trans[0], np.mean(y[5:]))
|
547 |
+
|
548 |
+
# category 1 does not exist in the first half, thus it will be encoded as
|
549 |
+
# the mean of the first half
|
550 |
+
assert_allclose(X_trans[-1], np.mean(y[:5]))
|
551 |
+
|
552 |
+
|
553 |
+
@pytest.mark.parametrize("smooth", [0.0, 1e3, "auto"])
|
554 |
+
def test_invariance_of_encoding_under_label_permutation(smooth, global_random_seed):
|
555 |
+
# Check that the encoding does not depend on the integer of the value of
|
556 |
+
# the integer labels. This is quite a trivial property but it is helpful
|
557 |
+
# to understand the following test.
|
558 |
+
rng = np.random.RandomState(global_random_seed)
|
559 |
+
|
560 |
+
# Random y and informative categorical X to make the test non-trivial when
|
561 |
+
# using smoothing.
|
562 |
+
y = rng.normal(size=1000)
|
563 |
+
n_categories = 30
|
564 |
+
X = KBinsDiscretizer(n_bins=n_categories, encode="ordinal").fit_transform(
|
565 |
+
y.reshape(-1, 1)
|
566 |
+
)
|
567 |
+
|
568 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
569 |
+
X, y, random_state=global_random_seed
|
570 |
+
)
|
571 |
+
|
572 |
+
# Shuffle the labels to make sure that the encoding is invariant to the
|
573 |
+
# permutation of the labels
|
574 |
+
permutated_labels = rng.permutation(n_categories)
|
575 |
+
X_train_permuted = permutated_labels[X_train.astype(np.int32)]
|
576 |
+
X_test_permuted = permutated_labels[X_test.astype(np.int32)]
|
577 |
+
|
578 |
+
target_encoder = TargetEncoder(smooth=smooth, random_state=global_random_seed)
|
579 |
+
X_train_encoded = target_encoder.fit_transform(X_train, y_train)
|
580 |
+
X_test_encoded = target_encoder.transform(X_test)
|
581 |
+
|
582 |
+
X_train_permuted_encoded = target_encoder.fit_transform(X_train_permuted, y_train)
|
583 |
+
X_test_permuted_encoded = target_encoder.transform(X_test_permuted)
|
584 |
+
|
585 |
+
assert_allclose(X_train_encoded, X_train_permuted_encoded)
|
586 |
+
assert_allclose(X_test_encoded, X_test_permuted_encoded)
|
587 |
+
|
588 |
+
|
589 |
+
# TODO(1.5) remove warning filter when kbd's subsample default is changed
|
590 |
+
@pytest.mark.filterwarnings("ignore:In version 1.5 onwards, subsample=200_000")
|
591 |
+
@pytest.mark.parametrize("smooth", [0.0, "auto"])
|
592 |
+
def test_target_encoding_for_linear_regression(smooth, global_random_seed):
|
593 |
+
# Check some expected statistical properties when fitting a linear
|
594 |
+
# regression model on target encoded features depending on their relation
|
595 |
+
# with that target.
|
596 |
+
|
597 |
+
# In this test, we use the Ridge class with the "lsqr" solver and a little
|
598 |
+
# bit of regularization to implement a linear regression model that
|
599 |
+
# converges quickly for large `n_samples` and robustly in case of
|
600 |
+
# correlated features. Since we will fit this model on a mean centered
|
601 |
+
# target, we do not need to fit an intercept and this will help simplify
|
602 |
+
# the analysis with respect to the expected coefficients.
|
603 |
+
linear_regression = Ridge(alpha=1e-6, solver="lsqr", fit_intercept=False)
|
604 |
+
|
605 |
+
# Construct a random target variable. We need a large number of samples for
|
606 |
+
# this test to be stable across all values of the random seed.
|
607 |
+
n_samples = 50_000
|
608 |
+
rng = np.random.RandomState(global_random_seed)
|
609 |
+
y = rng.randn(n_samples)
|
610 |
+
|
611 |
+
# Generate a single informative ordinal feature with medium cardinality.
|
612 |
+
# Inject some irreducible noise to make it harder for a multivariate model
|
613 |
+
# to identify the informative feature from other pure noise features.
|
614 |
+
noise = 0.8 * rng.randn(n_samples)
|
615 |
+
n_categories = 100
|
616 |
+
X_informative = KBinsDiscretizer(
|
617 |
+
n_bins=n_categories,
|
618 |
+
encode="ordinal",
|
619 |
+
strategy="uniform",
|
620 |
+
random_state=rng,
|
621 |
+
).fit_transform((y + noise).reshape(-1, 1))
|
622 |
+
|
623 |
+
# Let's permute the labels to hide the fact that this feature is
|
624 |
+
# informative to naive linear regression model trained on the raw ordinal
|
625 |
+
# values. As highlighted in the previous test, the target encoding should be
|
626 |
+
# invariant to such a permutation.
|
627 |
+
permutated_labels = rng.permutation(n_categories)
|
628 |
+
X_informative = permutated_labels[X_informative.astype(np.int32)]
|
629 |
+
|
630 |
+
# Generate a shuffled copy of the informative feature to destroy the
|
631 |
+
# relationship with the target.
|
632 |
+
X_shuffled = rng.permutation(X_informative)
|
633 |
+
|
634 |
+
# Also include a very high cardinality categorical feature that is by
|
635 |
+
# itself independent of the target variable: target encoding such a feature
|
636 |
+
# without internal cross-validation should cause catastrophic overfitting
|
637 |
+
# for the downstream regressor, even with shrinkage. This kind of features
|
638 |
+
# typically represents near unique identifiers of samples. In general they
|
639 |
+
# should be removed from a machine learning datasets but here we want to
|
640 |
+
# study the ability of the default behavior of TargetEncoder to mitigate
|
641 |
+
# them automatically.
|
642 |
+
X_near_unique_categories = rng.choice(
|
643 |
+
int(0.9 * n_samples), size=n_samples, replace=True
|
644 |
+
).reshape(-1, 1)
|
645 |
+
|
646 |
+
# Assemble the dataset and do a train-test split:
|
647 |
+
X = np.concatenate(
|
648 |
+
[X_informative, X_shuffled, X_near_unique_categories],
|
649 |
+
axis=1,
|
650 |
+
)
|
651 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
|
652 |
+
|
653 |
+
# Let's first check that a linear regression model trained on the raw
|
654 |
+
# features underfits because of the meaning-less ordinal encoding of the
|
655 |
+
# labels.
|
656 |
+
raw_model = linear_regression.fit(X_train, y_train)
|
657 |
+
assert raw_model.score(X_train, y_train) < 0.1
|
658 |
+
assert raw_model.score(X_test, y_test) < 0.1
|
659 |
+
|
660 |
+
# Now do the same with target encoding using the internal CV mechanism
|
661 |
+
# implemented when using fit_transform.
|
662 |
+
model_with_cv = make_pipeline(
|
663 |
+
TargetEncoder(smooth=smooth, random_state=rng), linear_regression
|
664 |
+
).fit(X_train, y_train)
|
665 |
+
|
666 |
+
# This model should be able to fit the data well and also generalise to the
|
667 |
+
# test data (assuming that the binning is fine-grained enough). The R2
|
668 |
+
# scores are not perfect because of the noise injected during the
|
669 |
+
# generation of the unique informative feature.
|
670 |
+
coef = model_with_cv[-1].coef_
|
671 |
+
assert model_with_cv.score(X_train, y_train) > 0.5, coef
|
672 |
+
assert model_with_cv.score(X_test, y_test) > 0.5, coef
|
673 |
+
|
674 |
+
# The target encoder recovers the linear relationship with slope 1 between
|
675 |
+
# the target encoded unique informative predictor and the target. Since the
|
676 |
+
# target encoding of the 2 other features is not informative thanks to the
|
677 |
+
# use of internal cross-validation, the multivariate linear regressor
|
678 |
+
# assigns a coef of 1 to the first feature and 0 to the other 2.
|
679 |
+
assert coef[0] == pytest.approx(1, abs=1e-2)
|
680 |
+
assert (np.abs(coef[1:]) < 0.2).all()
|
681 |
+
|
682 |
+
# Let's now disable the internal cross-validation by calling fit and then
|
683 |
+
# transform separately on the training set:
|
684 |
+
target_encoder = TargetEncoder(smooth=smooth, random_state=rng).fit(
|
685 |
+
X_train, y_train
|
686 |
+
)
|
687 |
+
X_enc_no_cv_train = target_encoder.transform(X_train)
|
688 |
+
X_enc_no_cv_test = target_encoder.transform(X_test)
|
689 |
+
model_no_cv = linear_regression.fit(X_enc_no_cv_train, y_train)
|
690 |
+
|
691 |
+
# The linear regression model should always overfit because it assigns
|
692 |
+
# too much weight to the extremely high cardinality feature relatively to
|
693 |
+
# the informative feature. Note that this is the case even when using
|
694 |
+
# the empirical Bayes smoothing which is not enough to prevent such
|
695 |
+
# overfitting alone.
|
696 |
+
coef = model_no_cv.coef_
|
697 |
+
assert model_no_cv.score(X_enc_no_cv_train, y_train) > 0.7, coef
|
698 |
+
assert model_no_cv.score(X_enc_no_cv_test, y_test) < 0.5, coef
|
699 |
+
|
700 |
+
# The model overfits because it assigns too much weight to the high
|
701 |
+
# cardinality yet non-informative feature instead of the lower
|
702 |
+
# cardinality yet informative feature:
|
703 |
+
assert abs(coef[0]) < abs(coef[2])
|
704 |
+
|
705 |
+
|
706 |
+
def test_pandas_copy_on_write():
|
707 |
+
"""
|
708 |
+
Test target-encoder cython code when y is read-only.
|
709 |
+
|
710 |
+
The numpy array underlying df["y"] is read-only when copy-on-write is enabled.
|
711 |
+
Non-regression test for gh-27879.
|
712 |
+
"""
|
713 |
+
pd = pytest.importorskip("pandas", minversion="2.0")
|
714 |
+
with pd.option_context("mode.copy_on_write", True):
|
715 |
+
df = pd.DataFrame({"x": ["a", "b", "b"], "y": [4.0, 5.0, 6.0]})
|
716 |
+
TargetEncoder(target_type="continuous").fit(df[["x"]], df["y"])
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (184 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arpack.cpython-310.pyc
ADDED
Binary file (656 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_array_api.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_arrayfuncs.cpython-310.pyc
ADDED
Binary file (1.32 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_bunch.cpython-310.pyc
ADDED
Binary file (996 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_class_weight.cpython-310.pyc
ADDED
Binary file (8.05 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_blas.cpython-310.pyc
ADDED
Binary file (5.98 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_cython_templating.cpython-310.pyc
ADDED
Binary file (1.05 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_deprecation.cpython-310.pyc
ADDED
Binary file (3.23 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_encode.cpython-310.pyc
ADDED
Binary file (6.39 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_checks.cpython-310.pyc
ADDED
Binary file (43.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_estimator_html_repr.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_extmath.cpython-310.pyc
ADDED
Binary file (25 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fast_dict.cpython-310.pyc
ADDED
Binary file (1.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_fixes.cpython-310.pyc
ADDED
Binary file (3.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_graph.cpython-310.pyc
ADDED
Binary file (2.38 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_metaestimators.cpython-310.pyc
ADDED
Binary file (2.21 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_mocking.cpython-310.pyc
ADDED
Binary file (5.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_multiclass.cpython-310.pyc
ADDED
Binary file (13.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_murmurhash.cpython-310.pyc
ADDED
Binary file (2.55 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_optimize.cpython-310.pyc
ADDED
Binary file (1.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_parallel.cpython-310.pyc
ADDED
Binary file (4.26 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_param_validation.cpython-310.pyc
ADDED
Binary file (21.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_plotting.cpython-310.pyc
ADDED
Binary file (1.68 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_pprint.cpython-310.pyc
ADDED
Binary file (23.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_random.cpython-310.pyc
ADDED
Binary file (4.59 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_response.cpython-310.pyc
ADDED
Binary file (8.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_seq_dataset.cpython-310.pyc
ADDED
Binary file (5.24 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_set_output.cpython-310.pyc
ADDED
Binary file (16 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_shortest_path.cpython-310.pyc
ADDED
Binary file (1.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_show_versions.cpython-310.pyc
ADDED
Binary file (1.32 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_sparsefuncs.cpython-310.pyc
ADDED
Binary file (21.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_stats.cpython-310.pyc
ADDED
Binary file (3.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/sklearn/utils/tests/__pycache__/test_tags.cpython-310.pyc
ADDED
Binary file (1.54 kB). View file
|
|