applied-ai-018 commited on
Commit
f35a2c6
·
verified ·
1 Parent(s): 77f4542

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__init__.py +88 -0
  12. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_plot.py +907 -0
  19. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_search.py +1918 -0
  20. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_split.py +0 -0
  21. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_validation.py +2360 -0
  22. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py +0 -0
  23. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py +24 -0
  31. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py +595 -0
  32. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py +2537 -0
  33. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py +2025 -0
  34. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py +848 -0
  35. env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py +2621 -0
  36. env-llmeval/lib/python3.10/site-packages/sklearn/utils/__init__.py +1299 -0
  37. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_arpack.py +30 -0
  38. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_array_api.py +575 -0
  39. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_available_if.py +93 -0
  40. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_bunch.py +67 -0
  41. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.pxd +41 -0
  42. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css +404 -0
  43. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd +18 -0
  44. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so +0 -0
  45. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.pxd +14 -0
  46. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mask.py +63 -0
  47. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py +1563 -0
  48. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so +0 -0
  49. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd +33 -0
  50. env-llmeval/lib/python3.10/site-packages/sklearn/utils/_param_validation.py +905 -0
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc ADDED
Binary file (32.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc ADDED
Binary file (9.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc ADDED
Binary file (38.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (58.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing
2
+
3
+ from ._plot import LearningCurveDisplay, ValidationCurveDisplay
4
+ from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV
5
+ from ._split import (
6
+ BaseCrossValidator,
7
+ BaseShuffleSplit,
8
+ GroupKFold,
9
+ GroupShuffleSplit,
10
+ KFold,
11
+ LeaveOneGroupOut,
12
+ LeaveOneOut,
13
+ LeavePGroupsOut,
14
+ LeavePOut,
15
+ PredefinedSplit,
16
+ RepeatedKFold,
17
+ RepeatedStratifiedKFold,
18
+ ShuffleSplit,
19
+ StratifiedGroupKFold,
20
+ StratifiedKFold,
21
+ StratifiedShuffleSplit,
22
+ TimeSeriesSplit,
23
+ check_cv,
24
+ train_test_split,
25
+ )
26
+ from ._validation import (
27
+ cross_val_predict,
28
+ cross_val_score,
29
+ cross_validate,
30
+ learning_curve,
31
+ permutation_test_score,
32
+ validation_curve,
33
+ )
34
+
35
+ if typing.TYPE_CHECKING:
36
+ # Avoid errors in type checkers (e.g. mypy) for experimental estimators.
37
+ # TODO: remove this check once the estimator is no longer experimental.
38
+ from ._search_successive_halving import ( # noqa
39
+ HalvingGridSearchCV,
40
+ HalvingRandomSearchCV,
41
+ )
42
+
43
+
44
+ __all__ = [
45
+ "BaseCrossValidator",
46
+ "BaseShuffleSplit",
47
+ "GridSearchCV",
48
+ "TimeSeriesSplit",
49
+ "KFold",
50
+ "GroupKFold",
51
+ "GroupShuffleSplit",
52
+ "LeaveOneGroupOut",
53
+ "LeaveOneOut",
54
+ "LeavePGroupsOut",
55
+ "LeavePOut",
56
+ "RepeatedKFold",
57
+ "RepeatedStratifiedKFold",
58
+ "ParameterGrid",
59
+ "ParameterSampler",
60
+ "PredefinedSplit",
61
+ "RandomizedSearchCV",
62
+ "ShuffleSplit",
63
+ "StratifiedKFold",
64
+ "StratifiedGroupKFold",
65
+ "StratifiedShuffleSplit",
66
+ "check_cv",
67
+ "cross_val_predict",
68
+ "cross_val_score",
69
+ "cross_validate",
70
+ "learning_curve",
71
+ "LearningCurveDisplay",
72
+ "permutation_test_score",
73
+ "train_test_split",
74
+ "validation_curve",
75
+ "ValidationCurveDisplay",
76
+ ]
77
+
78
+
79
+ # TODO: remove this check once the estimator is no longer experimental.
80
+ def __getattr__(name):
81
+ if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
82
+ raise ImportError(
83
+ f"{name} is experimental and the API might change without any "
84
+ "deprecation cycle. To use it, you need to explicitly import "
85
+ "enable_halving_search_cv:\n"
86
+ "from sklearn.experimental import enable_halving_search_cv"
87
+ )
88
+ raise AttributeError(f"module {__name__} has no attribute {name}")
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc ADDED
Binary file (64.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc ADDED
Binary file (37.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc ADDED
Binary file (84.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc ADDED
Binary file (69.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_plot.py ADDED
@@ -0,0 +1,907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import numpy as np
4
+
5
+ from ..utils import check_matplotlib_support
6
+ from ..utils._plotting import _interval_max_min_ratio, _validate_score_name
7
+ from ._validation import learning_curve, validation_curve
8
+
9
+
10
+ class _BaseCurveDisplay:
11
+ def _plot_curve(
12
+ self,
13
+ x_data,
14
+ *,
15
+ ax=None,
16
+ negate_score=False,
17
+ score_name=None,
18
+ score_type="test",
19
+ log_scale="deprecated",
20
+ std_display_style="fill_between",
21
+ line_kw=None,
22
+ fill_between_kw=None,
23
+ errorbar_kw=None,
24
+ ):
25
+ check_matplotlib_support(f"{self.__class__.__name__}.plot")
26
+
27
+ import matplotlib.pyplot as plt
28
+
29
+ if ax is None:
30
+ _, ax = plt.subplots()
31
+
32
+ if negate_score:
33
+ train_scores, test_scores = -self.train_scores, -self.test_scores
34
+ else:
35
+ train_scores, test_scores = self.train_scores, self.test_scores
36
+
37
+ if std_display_style not in ("errorbar", "fill_between", None):
38
+ raise ValueError(
39
+ f"Unknown std_display_style: {std_display_style}. Should be one of"
40
+ " 'errorbar', 'fill_between', or None."
41
+ )
42
+
43
+ if score_type not in ("test", "train", "both"):
44
+ raise ValueError(
45
+ f"Unknown score_type: {score_type}. Should be one of 'test', "
46
+ "'train', or 'both'."
47
+ )
48
+
49
+ if score_type == "train":
50
+ scores = {"Train": train_scores}
51
+ elif score_type == "test":
52
+ scores = {"Test": test_scores}
53
+ else: # score_type == "both"
54
+ scores = {"Train": train_scores, "Test": test_scores}
55
+
56
+ if std_display_style in ("fill_between", None):
57
+ # plot the mean score
58
+ if line_kw is None:
59
+ line_kw = {}
60
+
61
+ self.lines_ = []
62
+ for line_label, score in scores.items():
63
+ self.lines_.append(
64
+ *ax.plot(
65
+ x_data,
66
+ score.mean(axis=1),
67
+ label=line_label,
68
+ **line_kw,
69
+ )
70
+ )
71
+ self.errorbar_ = None
72
+ self.fill_between_ = None # overwritten below by fill_between
73
+
74
+ if std_display_style == "errorbar":
75
+ if errorbar_kw is None:
76
+ errorbar_kw = {}
77
+
78
+ self.errorbar_ = []
79
+ for line_label, score in scores.items():
80
+ self.errorbar_.append(
81
+ ax.errorbar(
82
+ x_data,
83
+ score.mean(axis=1),
84
+ score.std(axis=1),
85
+ label=line_label,
86
+ **errorbar_kw,
87
+ )
88
+ )
89
+ self.lines_, self.fill_between_ = None, None
90
+ elif std_display_style == "fill_between":
91
+ if fill_between_kw is None:
92
+ fill_between_kw = {}
93
+ default_fill_between_kw = {"alpha": 0.5}
94
+ fill_between_kw = {**default_fill_between_kw, **fill_between_kw}
95
+
96
+ self.fill_between_ = []
97
+ for line_label, score in scores.items():
98
+ self.fill_between_.append(
99
+ ax.fill_between(
100
+ x_data,
101
+ score.mean(axis=1) - score.std(axis=1),
102
+ score.mean(axis=1) + score.std(axis=1),
103
+ **fill_between_kw,
104
+ )
105
+ )
106
+
107
+ score_name = self.score_name if score_name is None else score_name
108
+
109
+ ax.legend()
110
+
111
+ # TODO(1.5): to be removed
112
+ if log_scale != "deprecated":
113
+ warnings.warn(
114
+ (
115
+ "The `log_scale` parameter is deprecated as of version 1.3 "
116
+ "and will be removed in 1.5. You can use display.ax_.set_xscale "
117
+ "and display.ax_.set_yscale instead."
118
+ ),
119
+ FutureWarning,
120
+ )
121
+ xscale = "log" if log_scale else "linear"
122
+ else:
123
+ # We found that a ratio, smaller or bigger than 5, between the largest and
124
+ # smallest gap of the x values is a good indicator to choose between linear
125
+ # and log scale.
126
+ if _interval_max_min_ratio(x_data) > 5:
127
+ xscale = "symlog" if x_data.min() <= 0 else "log"
128
+ else:
129
+ xscale = "linear"
130
+ ax.set_xscale(xscale)
131
+ ax.set_ylabel(f"{score_name}")
132
+
133
+ self.ax_ = ax
134
+ self.figure_ = ax.figure
135
+
136
+
137
+ class LearningCurveDisplay(_BaseCurveDisplay):
138
+ """Learning Curve visualization.
139
+
140
+ It is recommended to use
141
+ :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to
142
+ create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance.
143
+ All parameters are stored as attributes.
144
+
145
+ Read more in the :ref:`User Guide <visualizations>` for general information
146
+ about the visualization API and
147
+ :ref:`detailed documentation <learning_curve>` regarding the learning
148
+ curve visualization.
149
+
150
+ .. versionadded:: 1.2
151
+
152
+ Parameters
153
+ ----------
154
+ train_sizes : ndarray of shape (n_unique_ticks,)
155
+ Numbers of training examples that has been used to generate the
156
+ learning curve.
157
+
158
+ train_scores : ndarray of shape (n_ticks, n_cv_folds)
159
+ Scores on training sets.
160
+
161
+ test_scores : ndarray of shape (n_ticks, n_cv_folds)
162
+ Scores on test set.
163
+
164
+ score_name : str, default=None
165
+ The name of the score used in `learning_curve`. It will override the name
166
+ inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
167
+ `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
168
+ string or a callable, we infer the name. We replace `_` by spaces and capitalize
169
+ the first letter. We remove `neg_` and replace it by `"Negative"` if
170
+ `negate_score` is `False` or just remove it otherwise.
171
+
172
+ Attributes
173
+ ----------
174
+ ax_ : matplotlib Axes
175
+ Axes with the learning curve.
176
+
177
+ figure_ : matplotlib Figure
178
+ Figure containing the learning curve.
179
+
180
+ errorbar_ : list of matplotlib Artist or None
181
+ When the `std_display_style` is `"errorbar"`, this is a list of
182
+ `matplotlib.container.ErrorbarContainer` objects. If another style is
183
+ used, `errorbar_` is `None`.
184
+
185
+ lines_ : list of matplotlib Artist or None
186
+ When the `std_display_style` is `"fill_between"`, this is a list of
187
+ `matplotlib.lines.Line2D` objects corresponding to the mean train and
188
+ test scores. If another style is used, `line_` is `None`.
189
+
190
+ fill_between_ : list of matplotlib Artist or None
191
+ When the `std_display_style` is `"fill_between"`, this is a list of
192
+ `matplotlib.collections.PolyCollection` objects. If another style is
193
+ used, `fill_between_` is `None`.
194
+
195
+ See Also
196
+ --------
197
+ sklearn.model_selection.learning_curve : Compute the learning curve.
198
+
199
+ Examples
200
+ --------
201
+ >>> import matplotlib.pyplot as plt
202
+ >>> from sklearn.datasets import load_iris
203
+ >>> from sklearn.model_selection import LearningCurveDisplay, learning_curve
204
+ >>> from sklearn.tree import DecisionTreeClassifier
205
+ >>> X, y = load_iris(return_X_y=True)
206
+ >>> tree = DecisionTreeClassifier(random_state=0)
207
+ >>> train_sizes, train_scores, test_scores = learning_curve(
208
+ ... tree, X, y)
209
+ >>> display = LearningCurveDisplay(train_sizes=train_sizes,
210
+ ... train_scores=train_scores, test_scores=test_scores, score_name="Score")
211
+ >>> display.plot()
212
+ <...>
213
+ >>> plt.show()
214
+ """
215
+
216
+ def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None):
217
+ self.train_sizes = train_sizes
218
+ self.train_scores = train_scores
219
+ self.test_scores = test_scores
220
+ self.score_name = score_name
221
+
222
+ def plot(
223
+ self,
224
+ ax=None,
225
+ *,
226
+ negate_score=False,
227
+ score_name=None,
228
+ score_type="both",
229
+ log_scale="deprecated",
230
+ std_display_style="fill_between",
231
+ line_kw=None,
232
+ fill_between_kw=None,
233
+ errorbar_kw=None,
234
+ ):
235
+ """Plot visualization.
236
+
237
+ Parameters
238
+ ----------
239
+ ax : matplotlib Axes, default=None
240
+ Axes object to plot on. If `None`, a new figure and axes is
241
+ created.
242
+
243
+ negate_score : bool, default=False
244
+ Whether or not to negate the scores obtained through
245
+ :func:`~sklearn.model_selection.learning_curve`. This is
246
+ particularly useful when using the error denoted by `neg_*` in
247
+ `scikit-learn`.
248
+
249
+ score_name : str, default=None
250
+ The name of the score used to decorate the y-axis of the plot. It will
251
+ override the name inferred from the `scoring` parameter. If `score` is
252
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
253
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
254
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
255
+ replace it by `"Negative"` if `negate_score` is
256
+ `False` or just remove it otherwise.
257
+
258
+ score_type : {"test", "train", "both"}, default="both"
259
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
260
+ `"both"`.
261
+
262
+ log_scale : bool, default="deprecated"
263
+ Whether or not to use a logarithmic scale for the x-axis.
264
+
265
+ .. deprecated:: 1.3
266
+ `log_scale` is deprecated in 1.3 and will be removed in 1.5.
267
+ Use `display.ax_.set_xscale` and `display.ax_.set_yscale` instead.
268
+
269
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
270
+ The style used to display the score standard deviation around the
271
+ mean score. If None, no standard deviation representation is
272
+ displayed.
273
+
274
+ line_kw : dict, default=None
275
+ Additional keyword arguments passed to the `plt.plot` used to draw
276
+ the mean score.
277
+
278
+ fill_between_kw : dict, default=None
279
+ Additional keyword arguments passed to the `plt.fill_between` used
280
+ to draw the score standard deviation.
281
+
282
+ errorbar_kw : dict, default=None
283
+ Additional keyword arguments passed to the `plt.errorbar` used to
284
+ draw mean score and standard deviation score.
285
+
286
+ Returns
287
+ -------
288
+ display : :class:`~sklearn.model_selection.LearningCurveDisplay`
289
+ Object that stores computed values.
290
+ """
291
+ self._plot_curve(
292
+ self.train_sizes,
293
+ ax=ax,
294
+ negate_score=negate_score,
295
+ score_name=score_name,
296
+ score_type=score_type,
297
+ log_scale=log_scale,
298
+ std_display_style=std_display_style,
299
+ line_kw=line_kw,
300
+ fill_between_kw=fill_between_kw,
301
+ errorbar_kw=errorbar_kw,
302
+ )
303
+ self.ax_.set_xlabel("Number of samples in the training set")
304
+ return self
305
+
306
+ @classmethod
307
+ def from_estimator(
308
+ cls,
309
+ estimator,
310
+ X,
311
+ y,
312
+ *,
313
+ groups=None,
314
+ train_sizes=np.linspace(0.1, 1.0, 5),
315
+ cv=None,
316
+ scoring=None,
317
+ exploit_incremental_learning=False,
318
+ n_jobs=None,
319
+ pre_dispatch="all",
320
+ verbose=0,
321
+ shuffle=False,
322
+ random_state=None,
323
+ error_score=np.nan,
324
+ fit_params=None,
325
+ ax=None,
326
+ negate_score=False,
327
+ score_name=None,
328
+ score_type="both",
329
+ log_scale="deprecated",
330
+ std_display_style="fill_between",
331
+ line_kw=None,
332
+ fill_between_kw=None,
333
+ errorbar_kw=None,
334
+ ):
335
+ """Create a learning curve display from an estimator.
336
+
337
+ Read more in the :ref:`User Guide <visualizations>` for general
338
+ information about the visualization API and :ref:`detailed
339
+ documentation <learning_curve>` regarding the learning curve
340
+ visualization.
341
+
342
+ Parameters
343
+ ----------
344
+ estimator : object type that implements the "fit" and "predict" methods
345
+ An object of that type which is cloned for each validation.
346
+
347
+ X : array-like of shape (n_samples, n_features)
348
+ Training data, where `n_samples` is the number of samples and
349
+ `n_features` is the number of features.
350
+
351
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
352
+ Target relative to X for classification or regression;
353
+ None for unsupervised learning.
354
+
355
+ groups : array-like of shape (n_samples,), default=None
356
+ Group labels for the samples used while splitting the dataset into
357
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
358
+ instance (e.g., :class:`GroupKFold`).
359
+
360
+ train_sizes : array-like of shape (n_ticks,), \
361
+ default=np.linspace(0.1, 1.0, 5)
362
+ Relative or absolute numbers of training examples that will be used
363
+ to generate the learning curve. If the dtype is float, it is
364
+ regarded as a fraction of the maximum size of the training set
365
+ (that is determined by the selected validation method), i.e. it has
366
+ to be within (0, 1]. Otherwise it is interpreted as absolute sizes
367
+ of the training sets. Note that for classification the number of
368
+ samples usually have to be big enough to contain at least one
369
+ sample from each class.
370
+
371
+ cv : int, cross-validation generator or an iterable, default=None
372
+ Determines the cross-validation splitting strategy.
373
+ Possible inputs for cv are:
374
+
375
+ - None, to use the default 5-fold cross validation,
376
+ - int, to specify the number of folds in a `(Stratified)KFold`,
377
+ - :term:`CV splitter`,
378
+ - An iterable yielding (train, test) splits as arrays of indices.
379
+
380
+ For int/None inputs, if the estimator is a classifier and `y` is
381
+ either binary or multiclass,
382
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all
383
+ other cases, :class:`~sklearn.model_selection.KFold` is used. These
384
+ splitters are instantiated with `shuffle=False` so the splits will
385
+ be the same across calls.
386
+
387
+ Refer :ref:`User Guide <cross_validation>` for the various
388
+ cross-validation strategies that can be used here.
389
+
390
+ scoring : str or callable, default=None
391
+ A string (see :ref:`scoring_parameter`) or
392
+ a scorer callable object / function with signature
393
+ `scorer(estimator, X, y)` (see :ref:`scoring`).
394
+
395
+ exploit_incremental_learning : bool, default=False
396
+ If the estimator supports incremental learning, this will be
397
+ used to speed up fitting for different training set sizes.
398
+
399
+ n_jobs : int, default=None
400
+ Number of jobs to run in parallel. Training the estimator and
401
+ computing the score are parallelized over the different training
402
+ and test sets. `None` means 1 unless in a
403
+ :obj:`joblib.parallel_backend` context. `-1` means using all
404
+ processors. See :term:`Glossary <n_jobs>` for more details.
405
+
406
+ pre_dispatch : int or str, default='all'
407
+ Number of predispatched jobs for parallel execution (default is
408
+ all). The option can reduce the allocated memory. The str can
409
+ be an expression like '2*n_jobs'.
410
+
411
+ verbose : int, default=0
412
+ Controls the verbosity: the higher, the more messages.
413
+
414
+ shuffle : bool, default=False
415
+ Whether to shuffle training data before taking prefixes of it
416
+ based on`train_sizes`.
417
+
418
+ random_state : int, RandomState instance or None, default=None
419
+ Used when `shuffle` is True. Pass an int for reproducible
420
+ output across multiple function calls.
421
+ See :term:`Glossary <random_state>`.
422
+
423
+ error_score : 'raise' or numeric, default=np.nan
424
+ Value to assign to the score if an error occurs in estimator
425
+ fitting. If set to 'raise', the error is raised. If a numeric value
426
+ is given, FitFailedWarning is raised.
427
+
428
+ fit_params : dict, default=None
429
+ Parameters to pass to the fit method of the estimator.
430
+
431
+ ax : matplotlib Axes, default=None
432
+ Axes object to plot on. If `None`, a new figure and axes is
433
+ created.
434
+
435
+ negate_score : bool, default=False
436
+ Whether or not to negate the scores obtained through
437
+ :func:`~sklearn.model_selection.learning_curve`. This is
438
+ particularly useful when using the error denoted by `neg_*` in
439
+ `scikit-learn`.
440
+
441
+ score_name : str, default=None
442
+ The name of the score used to decorate the y-axis of the plot. It will
443
+ override the name inferred from the `scoring` parameter. If `score` is
444
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
445
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
446
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
447
+ replace it by `"Negative"` if `negate_score` is
448
+ `False` or just remove it otherwise.
449
+
450
+ score_type : {"test", "train", "both"}, default="both"
451
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
452
+ `"both"`.
453
+
454
+ log_scale : bool, default="deprecated"
455
+ Whether or not to use a logarithmic scale for the x-axis.
456
+
457
+ .. deprecated:: 1.3
458
+ `log_scale` is deprecated in 1.3 and will be removed in 1.5.
459
+ Use `display.ax_.xscale` and `display.ax_.yscale` instead.
460
+
461
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
462
+ The style used to display the score standard deviation around the
463
+ mean score. If `None`, no representation of the standard deviation
464
+ is displayed.
465
+
466
+ line_kw : dict, default=None
467
+ Additional keyword arguments passed to the `plt.plot` used to draw
468
+ the mean score.
469
+
470
+ fill_between_kw : dict, default=None
471
+ Additional keyword arguments passed to the `plt.fill_between` used
472
+ to draw the score standard deviation.
473
+
474
+ errorbar_kw : dict, default=None
475
+ Additional keyword arguments passed to the `plt.errorbar` used to
476
+ draw mean score and standard deviation score.
477
+
478
+ Returns
479
+ -------
480
+ display : :class:`~sklearn.model_selection.LearningCurveDisplay`
481
+ Object that stores computed values.
482
+
483
+ Examples
484
+ --------
485
+ >>> import matplotlib.pyplot as plt
486
+ >>> from sklearn.datasets import load_iris
487
+ >>> from sklearn.model_selection import LearningCurveDisplay
488
+ >>> from sklearn.tree import DecisionTreeClassifier
489
+ >>> X, y = load_iris(return_X_y=True)
490
+ >>> tree = DecisionTreeClassifier(random_state=0)
491
+ >>> LearningCurveDisplay.from_estimator(tree, X, y)
492
+ <...>
493
+ >>> plt.show()
494
+ """
495
+ check_matplotlib_support(f"{cls.__name__}.from_estimator")
496
+
497
+ score_name = _validate_score_name(score_name, scoring, negate_score)
498
+
499
+ train_sizes, train_scores, test_scores = learning_curve(
500
+ estimator,
501
+ X,
502
+ y,
503
+ groups=groups,
504
+ train_sizes=train_sizes,
505
+ cv=cv,
506
+ scoring=scoring,
507
+ exploit_incremental_learning=exploit_incremental_learning,
508
+ n_jobs=n_jobs,
509
+ pre_dispatch=pre_dispatch,
510
+ verbose=verbose,
511
+ shuffle=shuffle,
512
+ random_state=random_state,
513
+ error_score=error_score,
514
+ return_times=False,
515
+ fit_params=fit_params,
516
+ )
517
+
518
+ viz = cls(
519
+ train_sizes=train_sizes,
520
+ train_scores=train_scores,
521
+ test_scores=test_scores,
522
+ score_name=score_name,
523
+ )
524
+ return viz.plot(
525
+ ax=ax,
526
+ negate_score=negate_score,
527
+ score_type=score_type,
528
+ log_scale=log_scale,
529
+ std_display_style=std_display_style,
530
+ line_kw=line_kw,
531
+ fill_between_kw=fill_between_kw,
532
+ errorbar_kw=errorbar_kw,
533
+ )
534
+
535
+
536
+ class ValidationCurveDisplay(_BaseCurveDisplay):
537
+ """Validation Curve visualization.
538
+
539
+ It is recommended to use
540
+ :meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to
541
+ create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance.
542
+ All parameters are stored as attributes.
543
+
544
+ Read more in the :ref:`User Guide <visualizations>` for general information
545
+ about the visualization API and :ref:`detailed documentation
546
+ <validation_curve>` regarding the validation curve visualization.
547
+
548
+ .. versionadded:: 1.3
549
+
550
+ Parameters
551
+ ----------
552
+ param_name : str
553
+ Name of the parameter that has been varied.
554
+
555
+ param_range : array-like of shape (n_ticks,)
556
+ The values of the parameter that have been evaluated.
557
+
558
+ train_scores : ndarray of shape (n_ticks, n_cv_folds)
559
+ Scores on training sets.
560
+
561
+ test_scores : ndarray of shape (n_ticks, n_cv_folds)
562
+ Scores on test set.
563
+
564
+ score_name : str, default=None
565
+ The name of the score used in `validation_curve`. It will override the name
566
+ inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
567
+ `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
568
+ string or a callable, we infer the name. We replace `_` by spaces and capitalize
569
+ the first letter. We remove `neg_` and replace it by `"Negative"` if
570
+ `negate_score` is `False` or just remove it otherwise.
571
+
572
+ Attributes
573
+ ----------
574
+ ax_ : matplotlib Axes
575
+ Axes with the validation curve.
576
+
577
+ figure_ : matplotlib Figure
578
+ Figure containing the validation curve.
579
+
580
+ errorbar_ : list of matplotlib Artist or None
581
+ When the `std_display_style` is `"errorbar"`, this is a list of
582
+ `matplotlib.container.ErrorbarContainer` objects. If another style is
583
+ used, `errorbar_` is `None`.
584
+
585
+ lines_ : list of matplotlib Artist or None
586
+ When the `std_display_style` is `"fill_between"`, this is a list of
587
+ `matplotlib.lines.Line2D` objects corresponding to the mean train and
588
+ test scores. If another style is used, `line_` is `None`.
589
+
590
+ fill_between_ : list of matplotlib Artist or None
591
+ When the `std_display_style` is `"fill_between"`, this is a list of
592
+ `matplotlib.collections.PolyCollection` objects. If another style is
593
+ used, `fill_between_` is `None`.
594
+
595
+ See Also
596
+ --------
597
+ sklearn.model_selection.validation_curve : Compute the validation curve.
598
+
599
+ Examples
600
+ --------
601
+ >>> import numpy as np
602
+ >>> import matplotlib.pyplot as plt
603
+ >>> from sklearn.datasets import make_classification
604
+ >>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve
605
+ >>> from sklearn.linear_model import LogisticRegression
606
+ >>> X, y = make_classification(n_samples=1_000, random_state=0)
607
+ >>> logistic_regression = LogisticRegression()
608
+ >>> param_name, param_range = "C", np.logspace(-8, 3, 10)
609
+ >>> train_scores, test_scores = validation_curve(
610
+ ... logistic_regression, X, y, param_name=param_name, param_range=param_range
611
+ ... )
612
+ >>> display = ValidationCurveDisplay(
613
+ ... param_name=param_name, param_range=param_range,
614
+ ... train_scores=train_scores, test_scores=test_scores, score_name="Score"
615
+ ... )
616
+ >>> display.plot()
617
+ <...>
618
+ >>> plt.show()
619
+ """
620
+
621
+ def __init__(
622
+ self, *, param_name, param_range, train_scores, test_scores, score_name=None
623
+ ):
624
+ self.param_name = param_name
625
+ self.param_range = param_range
626
+ self.train_scores = train_scores
627
+ self.test_scores = test_scores
628
+ self.score_name = score_name
629
+
630
+ def plot(
631
+ self,
632
+ ax=None,
633
+ *,
634
+ negate_score=False,
635
+ score_name=None,
636
+ score_type="both",
637
+ std_display_style="fill_between",
638
+ line_kw=None,
639
+ fill_between_kw=None,
640
+ errorbar_kw=None,
641
+ ):
642
+ """Plot visualization.
643
+
644
+ Parameters
645
+ ----------
646
+ ax : matplotlib Axes, default=None
647
+ Axes object to plot on. If `None`, a new figure and axes is
648
+ created.
649
+
650
+ negate_score : bool, default=False
651
+ Whether or not to negate the scores obtained through
652
+ :func:`~sklearn.model_selection.validation_curve`. This is
653
+ particularly useful when using the error denoted by `neg_*` in
654
+ `scikit-learn`.
655
+
656
+ score_name : str, default=None
657
+ The name of the score used to decorate the y-axis of the plot. It will
658
+ override the name inferred from the `scoring` parameter. If `score` is
659
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
660
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
661
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
662
+ replace it by `"Negative"` if `negate_score` is
663
+ `False` or just remove it otherwise.
664
+
665
+ score_type : {"test", "train", "both"}, default="both"
666
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
667
+ `"both"`.
668
+
669
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
670
+ The style used to display the score standard deviation around the
671
+ mean score. If None, no standard deviation representation is
672
+ displayed.
673
+
674
+ line_kw : dict, default=None
675
+ Additional keyword arguments passed to the `plt.plot` used to draw
676
+ the mean score.
677
+
678
+ fill_between_kw : dict, default=None
679
+ Additional keyword arguments passed to the `plt.fill_between` used
680
+ to draw the score standard deviation.
681
+
682
+ errorbar_kw : dict, default=None
683
+ Additional keyword arguments passed to the `plt.errorbar` used to
684
+ draw mean score and standard deviation score.
685
+
686
+ Returns
687
+ -------
688
+ display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
689
+ Object that stores computed values.
690
+ """
691
+ self._plot_curve(
692
+ self.param_range,
693
+ ax=ax,
694
+ negate_score=negate_score,
695
+ score_name=score_name,
696
+ score_type=score_type,
697
+ log_scale="deprecated",
698
+ std_display_style=std_display_style,
699
+ line_kw=line_kw,
700
+ fill_between_kw=fill_between_kw,
701
+ errorbar_kw=errorbar_kw,
702
+ )
703
+ self.ax_.set_xlabel(f"{self.param_name}")
704
+ return self
705
+
706
+ @classmethod
707
+ def from_estimator(
708
+ cls,
709
+ estimator,
710
+ X,
711
+ y,
712
+ *,
713
+ param_name,
714
+ param_range,
715
+ groups=None,
716
+ cv=None,
717
+ scoring=None,
718
+ n_jobs=None,
719
+ pre_dispatch="all",
720
+ verbose=0,
721
+ error_score=np.nan,
722
+ fit_params=None,
723
+ ax=None,
724
+ negate_score=False,
725
+ score_name=None,
726
+ score_type="both",
727
+ std_display_style="fill_between",
728
+ line_kw=None,
729
+ fill_between_kw=None,
730
+ errorbar_kw=None,
731
+ ):
732
+ """Create a validation curve display from an estimator.
733
+
734
+ Read more in the :ref:`User Guide <visualizations>` for general
735
+ information about the visualization API and :ref:`detailed
736
+ documentation <validation_curve>` regarding the validation curve
737
+ visualization.
738
+
739
+ Parameters
740
+ ----------
741
+ estimator : object type that implements the "fit" and "predict" methods
742
+ An object of that type which is cloned for each validation.
743
+
744
+ X : array-like of shape (n_samples, n_features)
745
+ Training data, where `n_samples` is the number of samples and
746
+ `n_features` is the number of features.
747
+
748
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
749
+ Target relative to X for classification or regression;
750
+ None for unsupervised learning.
751
+
752
+ param_name : str
753
+ Name of the parameter that will be varied.
754
+
755
+ param_range : array-like of shape (n_values,)
756
+ The values of the parameter that will be evaluated.
757
+
758
+ groups : array-like of shape (n_samples,), default=None
759
+ Group labels for the samples used while splitting the dataset into
760
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
761
+ instance (e.g., :class:`GroupKFold`).
762
+
763
+ cv : int, cross-validation generator or an iterable, default=None
764
+ Determines the cross-validation splitting strategy.
765
+ Possible inputs for cv are:
766
+
767
+ - None, to use the default 5-fold cross validation,
768
+ - int, to specify the number of folds in a `(Stratified)KFold`,
769
+ - :term:`CV splitter`,
770
+ - An iterable yielding (train, test) splits as arrays of indices.
771
+
772
+ For int/None inputs, if the estimator is a classifier and `y` is
773
+ either binary or multiclass,
774
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all
775
+ other cases, :class:`~sklearn.model_selection.KFold` is used. These
776
+ splitters are instantiated with `shuffle=False` so the splits will
777
+ be the same across calls.
778
+
779
+ Refer :ref:`User Guide <cross_validation>` for the various
780
+ cross-validation strategies that can be used here.
781
+
782
+ scoring : str or callable, default=None
783
+ A string (see :ref:`scoring_parameter`) or
784
+ a scorer callable object / function with signature
785
+ `scorer(estimator, X, y)` (see :ref:`scoring`).
786
+
787
+ n_jobs : int, default=None
788
+ Number of jobs to run in parallel. Training the estimator and
789
+ computing the score are parallelized over the different training
790
+ and test sets. `None` means 1 unless in a
791
+ :obj:`joblib.parallel_backend` context. `-1` means using all
792
+ processors. See :term:`Glossary <n_jobs>` for more details.
793
+
794
+ pre_dispatch : int or str, default='all'
795
+ Number of predispatched jobs for parallel execution (default is
796
+ all). The option can reduce the allocated memory. The str can
797
+ be an expression like '2*n_jobs'.
798
+
799
+ verbose : int, default=0
800
+ Controls the verbosity: the higher, the more messages.
801
+
802
+ error_score : 'raise' or numeric, default=np.nan
803
+ Value to assign to the score if an error occurs in estimator
804
+ fitting. If set to 'raise', the error is raised. If a numeric value
805
+ is given, FitFailedWarning is raised.
806
+
807
+ fit_params : dict, default=None
808
+ Parameters to pass to the fit method of the estimator.
809
+
810
+ ax : matplotlib Axes, default=None
811
+ Axes object to plot on. If `None`, a new figure and axes is
812
+ created.
813
+
814
+ negate_score : bool, default=False
815
+ Whether or not to negate the scores obtained through
816
+ :func:`~sklearn.model_selection.validation_curve`. This is
817
+ particularly useful when using the error denoted by `neg_*` in
818
+ `scikit-learn`.
819
+
820
+ score_name : str, default=None
821
+ The name of the score used to decorate the y-axis of the plot. It will
822
+ override the name inferred from the `scoring` parameter. If `score` is
823
+ `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
824
+ otherwise. If `scoring` is a string or a callable, we infer the name. We
825
+ replace `_` by spaces and capitalize the first letter. We remove `neg_` and
826
+ replace it by `"Negative"` if `negate_score` is
827
+ `False` or just remove it otherwise.
828
+
829
+ score_type : {"test", "train", "both"}, default="both"
830
+ The type of score to plot. Can be one of `"test"`, `"train"`, or
831
+ `"both"`.
832
+
833
+ std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
834
+ The style used to display the score standard deviation around the
835
+ mean score. If `None`, no representation of the standard deviation
836
+ is displayed.
837
+
838
+ line_kw : dict, default=None
839
+ Additional keyword arguments passed to the `plt.plot` used to draw
840
+ the mean score.
841
+
842
+ fill_between_kw : dict, default=None
843
+ Additional keyword arguments passed to the `plt.fill_between` used
844
+ to draw the score standard deviation.
845
+
846
+ errorbar_kw : dict, default=None
847
+ Additional keyword arguments passed to the `plt.errorbar` used to
848
+ draw mean score and standard deviation score.
849
+
850
+ Returns
851
+ -------
852
+ display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
853
+ Object that stores computed values.
854
+
855
+ Examples
856
+ --------
857
+ >>> import numpy as np
858
+ >>> import matplotlib.pyplot as plt
859
+ >>> from sklearn.datasets import make_classification
860
+ >>> from sklearn.model_selection import ValidationCurveDisplay
861
+ >>> from sklearn.linear_model import LogisticRegression
862
+ >>> X, y = make_classification(n_samples=1_000, random_state=0)
863
+ >>> logistic_regression = LogisticRegression()
864
+ >>> param_name, param_range = "C", np.logspace(-8, 3, 10)
865
+ >>> ValidationCurveDisplay.from_estimator(
866
+ ... logistic_regression, X, y, param_name=param_name,
867
+ ... param_range=param_range,
868
+ ... )
869
+ <...>
870
+ >>> plt.show()
871
+ """
872
+ check_matplotlib_support(f"{cls.__name__}.from_estimator")
873
+
874
+ score_name = _validate_score_name(score_name, scoring, negate_score)
875
+
876
+ train_scores, test_scores = validation_curve(
877
+ estimator,
878
+ X,
879
+ y,
880
+ param_name=param_name,
881
+ param_range=param_range,
882
+ groups=groups,
883
+ cv=cv,
884
+ scoring=scoring,
885
+ n_jobs=n_jobs,
886
+ pre_dispatch=pre_dispatch,
887
+ verbose=verbose,
888
+ error_score=error_score,
889
+ fit_params=fit_params,
890
+ )
891
+
892
+ viz = cls(
893
+ param_name=param_name,
894
+ param_range=np.asarray(param_range),
895
+ train_scores=train_scores,
896
+ test_scores=test_scores,
897
+ score_name=score_name,
898
+ )
899
+ return viz.plot(
900
+ ax=ax,
901
+ negate_score=negate_score,
902
+ score_type=score_type,
903
+ std_display_style=std_display_style,
904
+ line_kw=line_kw,
905
+ fill_between_kw=fill_between_kw,
906
+ errorbar_kw=errorbar_kw,
907
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_search.py ADDED
@@ -0,0 +1,1918 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
3
+ parameters of an estimator.
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <[email protected]>,
7
+ # Gael Varoquaux <[email protected]>
8
+ # Andreas Mueller <[email protected]>
9
+ # Olivier Grisel <[email protected]>
10
+ # Raghav RV <[email protected]>
11
+ # License: BSD 3 clause
12
+
13
+ import numbers
14
+ import operator
15
+ import time
16
+ import warnings
17
+ from abc import ABCMeta, abstractmethod
18
+ from collections import defaultdict
19
+ from collections.abc import Iterable, Mapping, Sequence
20
+ from functools import partial, reduce
21
+ from itertools import product
22
+
23
+ import numpy as np
24
+ from numpy.ma import MaskedArray
25
+ from scipy.stats import rankdata
26
+
27
+ from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
28
+ from ..exceptions import NotFittedError
29
+ from ..metrics import check_scoring
30
+ from ..metrics._scorer import (
31
+ _check_multimetric_scoring,
32
+ _MultimetricScorer,
33
+ get_scorer_names,
34
+ )
35
+ from ..utils import Bunch, check_random_state
36
+ from ..utils._param_validation import HasMethods, Interval, StrOptions
37
+ from ..utils._tags import _safe_tags
38
+ from ..utils.metadata_routing import (
39
+ MetadataRouter,
40
+ MethodMapping,
41
+ _raise_for_params,
42
+ _routing_enabled,
43
+ process_routing,
44
+ )
45
+ from ..utils.metaestimators import available_if
46
+ from ..utils.parallel import Parallel, delayed
47
+ from ..utils.random import sample_without_replacement
48
+ from ..utils.validation import _check_method_params, check_is_fitted, indexable
49
+ from ._split import check_cv
50
+ from ._validation import (
51
+ _aggregate_score_dicts,
52
+ _fit_and_score,
53
+ _insert_error_scores,
54
+ _normalize_score_results,
55
+ _warn_or_raise_about_fit_failures,
56
+ )
57
+
58
+ __all__ = ["GridSearchCV", "ParameterGrid", "ParameterSampler", "RandomizedSearchCV"]
59
+
60
+
61
+ class ParameterGrid:
62
+ """Grid of parameters with a discrete number of values for each.
63
+
64
+ Can be used to iterate over parameter value combinations with the
65
+ Python built-in function iter.
66
+ The order of the generated parameter combinations is deterministic.
67
+
68
+ Read more in the :ref:`User Guide <grid_search>`.
69
+
70
+ Parameters
71
+ ----------
72
+ param_grid : dict of str to sequence, or sequence of such
73
+ The parameter grid to explore, as a dictionary mapping estimator
74
+ parameters to sequences of allowed values.
75
+
76
+ An empty dict signifies default parameters.
77
+
78
+ A sequence of dicts signifies a sequence of grids to search, and is
79
+ useful to avoid exploring parameter combinations that make no sense
80
+ or have no effect. See the examples below.
81
+
82
+ Examples
83
+ --------
84
+ >>> from sklearn.model_selection import ParameterGrid
85
+ >>> param_grid = {'a': [1, 2], 'b': [True, False]}
86
+ >>> list(ParameterGrid(param_grid)) == (
87
+ ... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
88
+ ... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
89
+ True
90
+
91
+ >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
92
+ >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
93
+ ... {'kernel': 'rbf', 'gamma': 1},
94
+ ... {'kernel': 'rbf', 'gamma': 10}]
95
+ True
96
+ >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
97
+ True
98
+
99
+ See Also
100
+ --------
101
+ GridSearchCV : Uses :class:`ParameterGrid` to perform a full parallelized
102
+ parameter search.
103
+ """
104
+
105
+ def __init__(self, param_grid):
106
+ if not isinstance(param_grid, (Mapping, Iterable)):
107
+ raise TypeError(
108
+ f"Parameter grid should be a dict or a list, got: {param_grid!r} of"
109
+ f" type {type(param_grid).__name__}"
110
+ )
111
+
112
+ if isinstance(param_grid, Mapping):
113
+ # wrap dictionary in a singleton list to support either dict
114
+ # or list of dicts
115
+ param_grid = [param_grid]
116
+
117
+ # check if all entries are dictionaries of lists
118
+ for grid in param_grid:
119
+ if not isinstance(grid, dict):
120
+ raise TypeError(f"Parameter grid is not a dict ({grid!r})")
121
+ for key, value in grid.items():
122
+ if isinstance(value, np.ndarray) and value.ndim > 1:
123
+ raise ValueError(
124
+ f"Parameter array for {key!r} should be one-dimensional, got:"
125
+ f" {value!r} with shape {value.shape}"
126
+ )
127
+ if isinstance(value, str) or not isinstance(
128
+ value, (np.ndarray, Sequence)
129
+ ):
130
+ raise TypeError(
131
+ f"Parameter grid for parameter {key!r} needs to be a list or a"
132
+ f" numpy array, but got {value!r} (of type "
133
+ f"{type(value).__name__}) instead. Single values "
134
+ "need to be wrapped in a list with one element."
135
+ )
136
+ if len(value) == 0:
137
+ raise ValueError(
138
+ f"Parameter grid for parameter {key!r} need "
139
+ f"to be a non-empty sequence, got: {value!r}"
140
+ )
141
+
142
+ self.param_grid = param_grid
143
+
144
+ def __iter__(self):
145
+ """Iterate over the points in the grid.
146
+
147
+ Returns
148
+ -------
149
+ params : iterator over dict of str to any
150
+ Yields dictionaries mapping each estimator parameter to one of its
151
+ allowed values.
152
+ """
153
+ for p in self.param_grid:
154
+ # Always sort the keys of a dictionary, for reproducibility
155
+ items = sorted(p.items())
156
+ if not items:
157
+ yield {}
158
+ else:
159
+ keys, values = zip(*items)
160
+ for v in product(*values):
161
+ params = dict(zip(keys, v))
162
+ yield params
163
+
164
+ def __len__(self):
165
+ """Number of points on the grid."""
166
+ # Product function that can handle iterables (np.prod can't).
167
+ product = partial(reduce, operator.mul)
168
+ return sum(
169
+ product(len(v) for v in p.values()) if p else 1 for p in self.param_grid
170
+ )
171
+
172
+ def __getitem__(self, ind):
173
+ """Get the parameters that would be ``ind``th in iteration
174
+
175
+ Parameters
176
+ ----------
177
+ ind : int
178
+ The iteration index
179
+
180
+ Returns
181
+ -------
182
+ params : dict of str to any
183
+ Equal to list(self)[ind]
184
+ """
185
+ # This is used to make discrete sampling without replacement memory
186
+ # efficient.
187
+ for sub_grid in self.param_grid:
188
+ # XXX: could memoize information used here
189
+ if not sub_grid:
190
+ if ind == 0:
191
+ return {}
192
+ else:
193
+ ind -= 1
194
+ continue
195
+
196
+ # Reverse so most frequent cycling parameter comes first
197
+ keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
198
+ sizes = [len(v_list) for v_list in values_lists]
199
+ total = np.prod(sizes)
200
+
201
+ if ind >= total:
202
+ # Try the next grid
203
+ ind -= total
204
+ else:
205
+ out = {}
206
+ for key, v_list, n in zip(keys, values_lists, sizes):
207
+ ind, offset = divmod(ind, n)
208
+ out[key] = v_list[offset]
209
+ return out
210
+
211
+ raise IndexError("ParameterGrid index out of range")
212
+
213
+
214
+ class ParameterSampler:
215
+ """Generator on parameters sampled from given distributions.
216
+
217
+ Non-deterministic iterable over random candidate combinations for hyper-
218
+ parameter search. If all parameters are presented as a list,
219
+ sampling without replacement is performed. If at least one parameter
220
+ is given as a distribution, sampling with replacement is used.
221
+ It is highly recommended to use continuous distributions for continuous
222
+ parameters.
223
+
224
+ Read more in the :ref:`User Guide <grid_search>`.
225
+
226
+ Parameters
227
+ ----------
228
+ param_distributions : dict
229
+ Dictionary with parameters names (`str`) as keys and distributions
230
+ or lists of parameters to try. Distributions must provide a ``rvs``
231
+ method for sampling (such as those from scipy.stats.distributions).
232
+ If a list is given, it is sampled uniformly.
233
+ If a list of dicts is given, first a dict is sampled uniformly, and
234
+ then a parameter is sampled using that dict as above.
235
+
236
+ n_iter : int
237
+ Number of parameter settings that are produced.
238
+
239
+ random_state : int, RandomState instance or None, default=None
240
+ Pseudo random number generator state used for random uniform sampling
241
+ from lists of possible values instead of scipy.stats distributions.
242
+ Pass an int for reproducible output across multiple
243
+ function calls.
244
+ See :term:`Glossary <random_state>`.
245
+
246
+ Returns
247
+ -------
248
+ params : dict of str to any
249
+ **Yields** dictionaries mapping each estimator parameter to
250
+ as sampled value.
251
+
252
+ Examples
253
+ --------
254
+ >>> from sklearn.model_selection import ParameterSampler
255
+ >>> from scipy.stats.distributions import expon
256
+ >>> import numpy as np
257
+ >>> rng = np.random.RandomState(0)
258
+ >>> param_grid = {'a':[1, 2], 'b': expon()}
259
+ >>> param_list = list(ParameterSampler(param_grid, n_iter=4,
260
+ ... random_state=rng))
261
+ >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
262
+ ... for d in param_list]
263
+ >>> rounded_list == [{'b': 0.89856, 'a': 1},
264
+ ... {'b': 0.923223, 'a': 1},
265
+ ... {'b': 1.878964, 'a': 2},
266
+ ... {'b': 1.038159, 'a': 2}]
267
+ True
268
+ """
269
+
270
+ def __init__(self, param_distributions, n_iter, *, random_state=None):
271
+ if not isinstance(param_distributions, (Mapping, Iterable)):
272
+ raise TypeError(
273
+ "Parameter distribution is not a dict or a list,"
274
+ f" got: {param_distributions!r} of type "
275
+ f"{type(param_distributions).__name__}"
276
+ )
277
+
278
+ if isinstance(param_distributions, Mapping):
279
+ # wrap dictionary in a singleton list to support either dict
280
+ # or list of dicts
281
+ param_distributions = [param_distributions]
282
+
283
+ for dist in param_distributions:
284
+ if not isinstance(dist, dict):
285
+ raise TypeError(
286
+ "Parameter distribution is not a dict ({!r})".format(dist)
287
+ )
288
+ for key in dist:
289
+ if not isinstance(dist[key], Iterable) and not hasattr(
290
+ dist[key], "rvs"
291
+ ):
292
+ raise TypeError(
293
+ f"Parameter grid for parameter {key!r} is not iterable "
294
+ f"or a distribution (value={dist[key]})"
295
+ )
296
+ self.n_iter = n_iter
297
+ self.random_state = random_state
298
+ self.param_distributions = param_distributions
299
+
300
+ def _is_all_lists(self):
301
+ return all(
302
+ all(not hasattr(v, "rvs") for v in dist.values())
303
+ for dist in self.param_distributions
304
+ )
305
+
306
+ def __iter__(self):
307
+ rng = check_random_state(self.random_state)
308
+
309
+ # if all distributions are given as lists, we want to sample without
310
+ # replacement
311
+ if self._is_all_lists():
312
+ # look up sampled parameter settings in parameter grid
313
+ param_grid = ParameterGrid(self.param_distributions)
314
+ grid_size = len(param_grid)
315
+ n_iter = self.n_iter
316
+
317
+ if grid_size < n_iter:
318
+ warnings.warn(
319
+ "The total space of parameters %d is smaller "
320
+ "than n_iter=%d. Running %d iterations. For exhaustive "
321
+ "searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size),
322
+ UserWarning,
323
+ )
324
+ n_iter = grid_size
325
+ for i in sample_without_replacement(grid_size, n_iter, random_state=rng):
326
+ yield param_grid[i]
327
+
328
+ else:
329
+ for _ in range(self.n_iter):
330
+ dist = rng.choice(self.param_distributions)
331
+ # Always sort the keys of a dictionary, for reproducibility
332
+ items = sorted(dist.items())
333
+ params = dict()
334
+ for k, v in items:
335
+ if hasattr(v, "rvs"):
336
+ params[k] = v.rvs(random_state=rng)
337
+ else:
338
+ params[k] = v[rng.randint(len(v))]
339
+ yield params
340
+
341
+ def __len__(self):
342
+ """Number of points that will be sampled."""
343
+ if self._is_all_lists():
344
+ grid_size = len(ParameterGrid(self.param_distributions))
345
+ return min(self.n_iter, grid_size)
346
+ else:
347
+ return self.n_iter
348
+
349
+
350
+ def _check_refit(search_cv, attr):
351
+ if not search_cv.refit:
352
+ raise AttributeError(
353
+ f"This {type(search_cv).__name__} instance was initialized with "
354
+ f"`refit=False`. {attr} is available only after refitting on the best "
355
+ "parameters. You can refit an estimator manually using the "
356
+ "`best_params_` attribute"
357
+ )
358
+
359
+
360
+ def _estimator_has(attr):
361
+ """Check if we can delegate a method to the underlying estimator.
362
+
363
+ Calling a prediction method will only be available if `refit=True`. In
364
+ such case, we check first the fitted best estimator. If it is not
365
+ fitted, we check the unfitted estimator.
366
+
367
+ Checking the unfitted estimator allows to use `hasattr` on the `SearchCV`
368
+ instance even before calling `fit`.
369
+ """
370
+
371
+ def check(self):
372
+ _check_refit(self, attr)
373
+ if hasattr(self, "best_estimator_"):
374
+ # raise an AttributeError if `attr` does not exist
375
+ getattr(self.best_estimator_, attr)
376
+ return True
377
+ # raise an AttributeError if `attr` does not exist
378
+ getattr(self.estimator, attr)
379
+ return True
380
+
381
+ return check
382
+
383
+
384
+ class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
385
+ """Abstract base class for hyper parameter search with cross-validation."""
386
+
387
+ _parameter_constraints: dict = {
388
+ "estimator": [HasMethods(["fit"])],
389
+ "scoring": [
390
+ StrOptions(set(get_scorer_names())),
391
+ callable,
392
+ list,
393
+ tuple,
394
+ dict,
395
+ None,
396
+ ],
397
+ "n_jobs": [numbers.Integral, None],
398
+ "refit": ["boolean", str, callable],
399
+ "cv": ["cv_object"],
400
+ "verbose": ["verbose"],
401
+ "pre_dispatch": [numbers.Integral, str],
402
+ "error_score": [StrOptions({"raise"}), numbers.Real],
403
+ "return_train_score": ["boolean"],
404
+ }
405
+
406
+ @abstractmethod
407
+ def __init__(
408
+ self,
409
+ estimator,
410
+ *,
411
+ scoring=None,
412
+ n_jobs=None,
413
+ refit=True,
414
+ cv=None,
415
+ verbose=0,
416
+ pre_dispatch="2*n_jobs",
417
+ error_score=np.nan,
418
+ return_train_score=True,
419
+ ):
420
+ self.scoring = scoring
421
+ self.estimator = estimator
422
+ self.n_jobs = n_jobs
423
+ self.refit = refit
424
+ self.cv = cv
425
+ self.verbose = verbose
426
+ self.pre_dispatch = pre_dispatch
427
+ self.error_score = error_score
428
+ self.return_train_score = return_train_score
429
+
430
+ @property
431
+ def _estimator_type(self):
432
+ return self.estimator._estimator_type
433
+
434
+ def _more_tags(self):
435
+ # allows cross-validation to see 'precomputed' metrics
436
+ return {
437
+ "pairwise": _safe_tags(self.estimator, "pairwise"),
438
+ "_xfail_checks": {
439
+ "check_supervised_y_2d": "DataConversionWarning not caught"
440
+ },
441
+ }
442
+
443
+ def score(self, X, y=None, **params):
444
+ """Return the score on the given data, if the estimator has been refit.
445
+
446
+ This uses the score defined by ``scoring`` where provided, and the
447
+ ``best_estimator_.score`` method otherwise.
448
+
449
+ Parameters
450
+ ----------
451
+ X : array-like of shape (n_samples, n_features)
452
+ Input data, where `n_samples` is the number of samples and
453
+ `n_features` is the number of features.
454
+
455
+ y : array-like of shape (n_samples, n_output) \
456
+ or (n_samples,), default=None
457
+ Target relative to X for classification or regression;
458
+ None for unsupervised learning.
459
+
460
+ **params : dict
461
+ Parameters to be passed to the underlying scorer(s).
462
+
463
+ ..versionadded:: 1.4
464
+ Only available if `enable_metadata_routing=True`. See
465
+ :ref:`Metadata Routing User Guide <metadata_routing>` for more
466
+ details.
467
+
468
+ Returns
469
+ -------
470
+ score : float
471
+ The score defined by ``scoring`` if provided, and the
472
+ ``best_estimator_.score`` method otherwise.
473
+ """
474
+ _check_refit(self, "score")
475
+ check_is_fitted(self)
476
+
477
+ _raise_for_params(params, self, "score")
478
+
479
+ if _routing_enabled():
480
+ score_params = process_routing(self, "score", **params).scorer["score"]
481
+ else:
482
+ score_params = dict()
483
+
484
+ if self.scorer_ is None:
485
+ raise ValueError(
486
+ "No score function explicitly defined, "
487
+ "and the estimator doesn't provide one %s"
488
+ % self.best_estimator_
489
+ )
490
+ if isinstance(self.scorer_, dict):
491
+ if self.multimetric_:
492
+ scorer = self.scorer_[self.refit]
493
+ else:
494
+ scorer = self.scorer_
495
+ return scorer(self.best_estimator_, X, y, **score_params)
496
+
497
+ # callable
498
+ score = self.scorer_(self.best_estimator_, X, y, **score_params)
499
+ if self.multimetric_:
500
+ score = score[self.refit]
501
+ return score
502
+
503
+ @available_if(_estimator_has("score_samples"))
504
+ def score_samples(self, X):
505
+ """Call score_samples on the estimator with the best found parameters.
506
+
507
+ Only available if ``refit=True`` and the underlying estimator supports
508
+ ``score_samples``.
509
+
510
+ .. versionadded:: 0.24
511
+
512
+ Parameters
513
+ ----------
514
+ X : iterable
515
+ Data to predict on. Must fulfill input requirements
516
+ of the underlying estimator.
517
+
518
+ Returns
519
+ -------
520
+ y_score : ndarray of shape (n_samples,)
521
+ The ``best_estimator_.score_samples`` method.
522
+ """
523
+ check_is_fitted(self)
524
+ return self.best_estimator_.score_samples(X)
525
+
526
+ @available_if(_estimator_has("predict"))
527
+ def predict(self, X):
528
+ """Call predict on the estimator with the best found parameters.
529
+
530
+ Only available if ``refit=True`` and the underlying estimator supports
531
+ ``predict``.
532
+
533
+ Parameters
534
+ ----------
535
+ X : indexable, length n_samples
536
+ Must fulfill the input assumptions of the
537
+ underlying estimator.
538
+
539
+ Returns
540
+ -------
541
+ y_pred : ndarray of shape (n_samples,)
542
+ The predicted labels or values for `X` based on the estimator with
543
+ the best found parameters.
544
+ """
545
+ check_is_fitted(self)
546
+ return self.best_estimator_.predict(X)
547
+
548
+ @available_if(_estimator_has("predict_proba"))
549
+ def predict_proba(self, X):
550
+ """Call predict_proba on the estimator with the best found parameters.
551
+
552
+ Only available if ``refit=True`` and the underlying estimator supports
553
+ ``predict_proba``.
554
+
555
+ Parameters
556
+ ----------
557
+ X : indexable, length n_samples
558
+ Must fulfill the input assumptions of the
559
+ underlying estimator.
560
+
561
+ Returns
562
+ -------
563
+ y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
564
+ Predicted class probabilities for `X` based on the estimator with
565
+ the best found parameters. The order of the classes corresponds
566
+ to that in the fitted attribute :term:`classes_`.
567
+ """
568
+ check_is_fitted(self)
569
+ return self.best_estimator_.predict_proba(X)
570
+
571
+ @available_if(_estimator_has("predict_log_proba"))
572
+ def predict_log_proba(self, X):
573
+ """Call predict_log_proba on the estimator with the best found parameters.
574
+
575
+ Only available if ``refit=True`` and the underlying estimator supports
576
+ ``predict_log_proba``.
577
+
578
+ Parameters
579
+ ----------
580
+ X : indexable, length n_samples
581
+ Must fulfill the input assumptions of the
582
+ underlying estimator.
583
+
584
+ Returns
585
+ -------
586
+ y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
587
+ Predicted class log-probabilities for `X` based on the estimator
588
+ with the best found parameters. The order of the classes
589
+ corresponds to that in the fitted attribute :term:`classes_`.
590
+ """
591
+ check_is_fitted(self)
592
+ return self.best_estimator_.predict_log_proba(X)
593
+
594
+ @available_if(_estimator_has("decision_function"))
595
+ def decision_function(self, X):
596
+ """Call decision_function on the estimator with the best found parameters.
597
+
598
+ Only available if ``refit=True`` and the underlying estimator supports
599
+ ``decision_function``.
600
+
601
+ Parameters
602
+ ----------
603
+ X : indexable, length n_samples
604
+ Must fulfill the input assumptions of the
605
+ underlying estimator.
606
+
607
+ Returns
608
+ -------
609
+ y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \
610
+ or (n_samples, n_classes * (n_classes-1) / 2)
611
+ Result of the decision function for `X` based on the estimator with
612
+ the best found parameters.
613
+ """
614
+ check_is_fitted(self)
615
+ return self.best_estimator_.decision_function(X)
616
+
617
+ @available_if(_estimator_has("transform"))
618
+ def transform(self, X):
619
+ """Call transform on the estimator with the best found parameters.
620
+
621
+ Only available if the underlying estimator supports ``transform`` and
622
+ ``refit=True``.
623
+
624
+ Parameters
625
+ ----------
626
+ X : indexable, length n_samples
627
+ Must fulfill the input assumptions of the
628
+ underlying estimator.
629
+
630
+ Returns
631
+ -------
632
+ Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
633
+ `X` transformed in the new space based on the estimator with
634
+ the best found parameters.
635
+ """
636
+ check_is_fitted(self)
637
+ return self.best_estimator_.transform(X)
638
+
639
+ @available_if(_estimator_has("inverse_transform"))
640
+ def inverse_transform(self, Xt):
641
+ """Call inverse_transform on the estimator with the best found params.
642
+
643
+ Only available if the underlying estimator implements
644
+ ``inverse_transform`` and ``refit=True``.
645
+
646
+ Parameters
647
+ ----------
648
+ Xt : indexable, length n_samples
649
+ Must fulfill the input assumptions of the
650
+ underlying estimator.
651
+
652
+ Returns
653
+ -------
654
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
655
+ Result of the `inverse_transform` function for `Xt` based on the
656
+ estimator with the best found parameters.
657
+ """
658
+ check_is_fitted(self)
659
+ return self.best_estimator_.inverse_transform(Xt)
660
+
661
+ @property
662
+ def n_features_in_(self):
663
+ """Number of features seen during :term:`fit`.
664
+
665
+ Only available when `refit=True`.
666
+ """
667
+ # For consistency with other estimators we raise a AttributeError so
668
+ # that hasattr() fails if the search estimator isn't fitted.
669
+ try:
670
+ check_is_fitted(self)
671
+ except NotFittedError as nfe:
672
+ raise AttributeError(
673
+ "{} object has no n_features_in_ attribute.".format(
674
+ self.__class__.__name__
675
+ )
676
+ ) from nfe
677
+
678
+ return self.best_estimator_.n_features_in_
679
+
680
+ @property
681
+ def classes_(self):
682
+ """Class labels.
683
+
684
+ Only available when `refit=True` and the estimator is a classifier.
685
+ """
686
+ _estimator_has("classes_")(self)
687
+ return self.best_estimator_.classes_
688
+
689
+ def _run_search(self, evaluate_candidates):
690
+ """Repeatedly calls `evaluate_candidates` to conduct a search.
691
+
692
+ This method, implemented in sub-classes, makes it possible to
693
+ customize the scheduling of evaluations: GridSearchCV and
694
+ RandomizedSearchCV schedule evaluations for their whole parameter
695
+ search space at once but other more sequential approaches are also
696
+ possible: for instance is possible to iteratively schedule evaluations
697
+ for new regions of the parameter search space based on previously
698
+ collected evaluation results. This makes it possible to implement
699
+ Bayesian optimization or more generally sequential model-based
700
+ optimization by deriving from the BaseSearchCV abstract base class.
701
+ For example, Successive Halving is implemented by calling
702
+ `evaluate_candidates` multiples times (once per iteration of the SH
703
+ process), each time passing a different set of candidates with `X`
704
+ and `y` of increasing sizes.
705
+
706
+ Parameters
707
+ ----------
708
+ evaluate_candidates : callable
709
+ This callback accepts:
710
+ - a list of candidates, where each candidate is a dict of
711
+ parameter settings.
712
+ - an optional `cv` parameter which can be used to e.g.
713
+ evaluate candidates on different dataset splits, or
714
+ evaluate candidates on subsampled data (as done in the
715
+ SucessiveHaling estimators). By default, the original `cv`
716
+ parameter is used, and it is available as a private
717
+ `_checked_cv_orig` attribute.
718
+ - an optional `more_results` dict. Each key will be added to
719
+ the `cv_results_` attribute. Values should be lists of
720
+ length `n_candidates`
721
+
722
+ It returns a dict of all results so far, formatted like
723
+ ``cv_results_``.
724
+
725
+ Important note (relevant whether the default cv is used or not):
726
+ in randomized splitters, and unless the random_state parameter of
727
+ cv was set to an int, calling cv.split() multiple times will
728
+ yield different splits. Since cv.split() is called in
729
+ evaluate_candidates, this means that candidates will be evaluated
730
+ on different splits each time evaluate_candidates is called. This
731
+ might be a methodological issue depending on the search strategy
732
+ that you're implementing. To prevent randomized splitters from
733
+ being used, you may use _split._yields_constant_splits()
734
+
735
+ Examples
736
+ --------
737
+
738
+ ::
739
+
740
+ def _run_search(self, evaluate_candidates):
741
+ 'Try C=0.1 only if C=1 is better than C=10'
742
+ all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
743
+ score = all_results['mean_test_score']
744
+ if score[0] < score[1]:
745
+ evaluate_candidates([{'C': 0.1}])
746
+ """
747
+ raise NotImplementedError("_run_search not implemented.")
748
+
749
+ def _check_refit_for_multimetric(self, scores):
750
+ """Check `refit` is compatible with `scores` is valid"""
751
+ multimetric_refit_msg = (
752
+ "For multi-metric scoring, the parameter refit must be set to a "
753
+ "scorer key or a callable to refit an estimator with the best "
754
+ "parameter setting on the whole data and make the best_* "
755
+ "attributes available for that metric. If this is not needed, "
756
+ f"refit should be set to False explicitly. {self.refit!r} was "
757
+ "passed."
758
+ )
759
+
760
+ valid_refit_dict = isinstance(self.refit, str) and self.refit in scores
761
+
762
+ if (
763
+ self.refit is not False
764
+ and not valid_refit_dict
765
+ and not callable(self.refit)
766
+ ):
767
+ raise ValueError(multimetric_refit_msg)
768
+
769
+ @staticmethod
770
+ def _select_best_index(refit, refit_metric, results):
771
+ """Select index of the best combination of hyperparemeters."""
772
+ if callable(refit):
773
+ # If callable, refit is expected to return the index of the best
774
+ # parameter set.
775
+ best_index = refit(results)
776
+ if not isinstance(best_index, numbers.Integral):
777
+ raise TypeError("best_index_ returned is not an integer")
778
+ if best_index < 0 or best_index >= len(results["params"]):
779
+ raise IndexError("best_index_ index out of range")
780
+ else:
781
+ best_index = results[f"rank_test_{refit_metric}"].argmin()
782
+ return best_index
783
+
784
+ def _get_scorers(self, convert_multimetric):
785
+ """Get the scorer(s) to be used.
786
+
787
+ This is used in ``fit`` and ``get_metadata_routing``.
788
+
789
+ Parameters
790
+ ----------
791
+ convert_multimetric : bool
792
+ Whether to convert a dict of scorers to a _MultimetricScorer. This
793
+ is used in ``get_metadata_routing`` to include the routing info for
794
+ multiple scorers.
795
+
796
+ Returns
797
+ -------
798
+ scorers, refit_metric
799
+ """
800
+ refit_metric = "score"
801
+
802
+ if callable(self.scoring):
803
+ scorers = self.scoring
804
+ elif self.scoring is None or isinstance(self.scoring, str):
805
+ scorers = check_scoring(self.estimator, self.scoring)
806
+ else:
807
+ scorers = _check_multimetric_scoring(self.estimator, self.scoring)
808
+ self._check_refit_for_multimetric(scorers)
809
+ refit_metric = self.refit
810
+ if convert_multimetric and isinstance(scorers, dict):
811
+ scorers = _MultimetricScorer(
812
+ scorers=scorers, raise_exc=(self.error_score == "raise")
813
+ )
814
+
815
+ return scorers, refit_metric
816
+
817
+ def _get_routed_params_for_fit(self, params):
818
+ """Get the parameters to be used for routing.
819
+
820
+ This is a method instead of a snippet in ``fit`` since it's used twice,
821
+ here in ``fit``, and in ``HalvingRandomSearchCV.fit``.
822
+ """
823
+ if _routing_enabled():
824
+ routed_params = process_routing(self, "fit", **params)
825
+ else:
826
+ params = params.copy()
827
+ groups = params.pop("groups", None)
828
+ routed_params = Bunch(
829
+ estimator=Bunch(fit=params),
830
+ splitter=Bunch(split={"groups": groups}),
831
+ scorer=Bunch(score={}),
832
+ )
833
+ return routed_params
834
+
835
+ @_fit_context(
836
+ # *SearchCV.estimator is not validated yet
837
+ prefer_skip_nested_validation=False
838
+ )
839
+ def fit(self, X, y=None, **params):
840
+ """Run fit with all sets of parameters.
841
+
842
+ Parameters
843
+ ----------
844
+
845
+ X : array-like of shape (n_samples, n_features)
846
+ Training vector, where `n_samples` is the number of samples and
847
+ `n_features` is the number of features.
848
+
849
+ y : array-like of shape (n_samples, n_output) \
850
+ or (n_samples,), default=None
851
+ Target relative to X for classification or regression;
852
+ None for unsupervised learning.
853
+
854
+ **params : dict of str -> object
855
+ Parameters passed to the ``fit`` method of the estimator, the scorer,
856
+ and the CV splitter.
857
+
858
+ If a fit parameter is an array-like whose length is equal to
859
+ `num_samples` then it will be split across CV groups along with `X`
860
+ and `y`. For example, the :term:`sample_weight` parameter is split
861
+ because `len(sample_weights) = len(X)`.
862
+
863
+ Returns
864
+ -------
865
+ self : object
866
+ Instance of fitted estimator.
867
+ """
868
+ estimator = self.estimator
869
+ # Here we keep a dict of scorers as is, and only convert to a
870
+ # _MultimetricScorer at a later stage. Issue:
871
+ # https://github.com/scikit-learn/scikit-learn/issues/27001
872
+ scorers, refit_metric = self._get_scorers(convert_multimetric=False)
873
+
874
+ X, y = indexable(X, y)
875
+ params = _check_method_params(X, params=params)
876
+
877
+ routed_params = self._get_routed_params_for_fit(params)
878
+
879
+ cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
880
+ n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split)
881
+
882
+ base_estimator = clone(self.estimator)
883
+
884
+ parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
885
+
886
+ fit_and_score_kwargs = dict(
887
+ scorer=scorers,
888
+ fit_params=routed_params.estimator.fit,
889
+ score_params=routed_params.scorer.score,
890
+ return_train_score=self.return_train_score,
891
+ return_n_test_samples=True,
892
+ return_times=True,
893
+ return_parameters=False,
894
+ error_score=self.error_score,
895
+ verbose=self.verbose,
896
+ )
897
+ results = {}
898
+ with parallel:
899
+ all_candidate_params = []
900
+ all_out = []
901
+ all_more_results = defaultdict(list)
902
+
903
+ def evaluate_candidates(candidate_params, cv=None, more_results=None):
904
+ cv = cv or cv_orig
905
+ candidate_params = list(candidate_params)
906
+ n_candidates = len(candidate_params)
907
+
908
+ if self.verbose > 0:
909
+ print(
910
+ "Fitting {0} folds for each of {1} candidates,"
911
+ " totalling {2} fits".format(
912
+ n_splits, n_candidates, n_candidates * n_splits
913
+ )
914
+ )
915
+
916
+ out = parallel(
917
+ delayed(_fit_and_score)(
918
+ clone(base_estimator),
919
+ X,
920
+ y,
921
+ train=train,
922
+ test=test,
923
+ parameters=parameters,
924
+ split_progress=(split_idx, n_splits),
925
+ candidate_progress=(cand_idx, n_candidates),
926
+ **fit_and_score_kwargs,
927
+ )
928
+ for (cand_idx, parameters), (split_idx, (train, test)) in product(
929
+ enumerate(candidate_params),
930
+ enumerate(cv.split(X, y, **routed_params.splitter.split)),
931
+ )
932
+ )
933
+
934
+ if len(out) < 1:
935
+ raise ValueError(
936
+ "No fits were performed. "
937
+ "Was the CV iterator empty? "
938
+ "Were there no candidates?"
939
+ )
940
+ elif len(out) != n_candidates * n_splits:
941
+ raise ValueError(
942
+ "cv.split and cv.get_n_splits returned "
943
+ "inconsistent results. Expected {} "
944
+ "splits, got {}".format(n_splits, len(out) // n_candidates)
945
+ )
946
+
947
+ _warn_or_raise_about_fit_failures(out, self.error_score)
948
+
949
+ # For callable self.scoring, the return type is only know after
950
+ # calling. If the return type is a dictionary, the error scores
951
+ # can now be inserted with the correct key. The type checking
952
+ # of out will be done in `_insert_error_scores`.
953
+ if callable(self.scoring):
954
+ _insert_error_scores(out, self.error_score)
955
+
956
+ all_candidate_params.extend(candidate_params)
957
+ all_out.extend(out)
958
+
959
+ if more_results is not None:
960
+ for key, value in more_results.items():
961
+ all_more_results[key].extend(value)
962
+
963
+ nonlocal results
964
+ results = self._format_results(
965
+ all_candidate_params, n_splits, all_out, all_more_results
966
+ )
967
+
968
+ return results
969
+
970
+ self._run_search(evaluate_candidates)
971
+
972
+ # multimetric is determined here because in the case of a callable
973
+ # self.scoring the return type is only known after calling
974
+ first_test_score = all_out[0]["test_scores"]
975
+ self.multimetric_ = isinstance(first_test_score, dict)
976
+
977
+ # check refit_metric now for a callabe scorer that is multimetric
978
+ if callable(self.scoring) and self.multimetric_:
979
+ self._check_refit_for_multimetric(first_test_score)
980
+ refit_metric = self.refit
981
+
982
+ # For multi-metric evaluation, store the best_index_, best_params_ and
983
+ # best_score_ iff refit is one of the scorer names
984
+ # In single metric evaluation, refit_metric is "score"
985
+ if self.refit or not self.multimetric_:
986
+ self.best_index_ = self._select_best_index(
987
+ self.refit, refit_metric, results
988
+ )
989
+ if not callable(self.refit):
990
+ # With a non-custom callable, we can select the best score
991
+ # based on the best index
992
+ self.best_score_ = results[f"mean_test_{refit_metric}"][
993
+ self.best_index_
994
+ ]
995
+ self.best_params_ = results["params"][self.best_index_]
996
+
997
+ if self.refit:
998
+ # here we clone the estimator as well as the parameters, since
999
+ # sometimes the parameters themselves might be estimators, e.g.
1000
+ # when we search over different estimators in a pipeline.
1001
+ # ref: https://github.com/scikit-learn/scikit-learn/pull/26786
1002
+ self.best_estimator_ = clone(base_estimator).set_params(
1003
+ **clone(self.best_params_, safe=False)
1004
+ )
1005
+
1006
+ refit_start_time = time.time()
1007
+ if y is not None:
1008
+ self.best_estimator_.fit(X, y, **routed_params.estimator.fit)
1009
+ else:
1010
+ self.best_estimator_.fit(X, **routed_params.estimator.fit)
1011
+ refit_end_time = time.time()
1012
+ self.refit_time_ = refit_end_time - refit_start_time
1013
+
1014
+ if hasattr(self.best_estimator_, "feature_names_in_"):
1015
+ self.feature_names_in_ = self.best_estimator_.feature_names_in_
1016
+
1017
+ # Store the only scorer not as a dict for single metric evaluation
1018
+ self.scorer_ = scorers
1019
+
1020
+ self.cv_results_ = results
1021
+ self.n_splits_ = n_splits
1022
+
1023
+ return self
1024
+
1025
+ def _format_results(self, candidate_params, n_splits, out, more_results=None):
1026
+ n_candidates = len(candidate_params)
1027
+ out = _aggregate_score_dicts(out)
1028
+
1029
+ results = dict(more_results or {})
1030
+ for key, val in results.items():
1031
+ # each value is a list (as per evaluate_candidate's convention)
1032
+ # we convert it to an array for consistency with the other keys
1033
+ results[key] = np.asarray(val)
1034
+
1035
+ def _store(key_name, array, weights=None, splits=False, rank=False):
1036
+ """A small helper to store the scores/times to the cv_results_"""
1037
+ # When iterated first by splits, then by parameters
1038
+ # We want `array` to have `n_candidates` rows and `n_splits` cols.
1039
+ array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
1040
+ if splits:
1041
+ for split_idx in range(n_splits):
1042
+ # Uses closure to alter the results
1043
+ results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx]
1044
+
1045
+ array_means = np.average(array, axis=1, weights=weights)
1046
+ results["mean_%s" % key_name] = array_means
1047
+
1048
+ if key_name.startswith(("train_", "test_")) and np.any(
1049
+ ~np.isfinite(array_means)
1050
+ ):
1051
+ warnings.warn(
1052
+ (
1053
+ f"One or more of the {key_name.split('_')[0]} scores "
1054
+ f"are non-finite: {array_means}"
1055
+ ),
1056
+ category=UserWarning,
1057
+ )
1058
+
1059
+ # Weighted std is not directly available in numpy
1060
+ array_stds = np.sqrt(
1061
+ np.average(
1062
+ (array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights
1063
+ )
1064
+ )
1065
+ results["std_%s" % key_name] = array_stds
1066
+
1067
+ if rank:
1068
+ # When the fit/scoring fails `array_means` contains NaNs, we
1069
+ # will exclude them from the ranking process and consider them
1070
+ # as tied with the worst performers.
1071
+ if np.isnan(array_means).all():
1072
+ # All fit/scoring routines failed.
1073
+ rank_result = np.ones_like(array_means, dtype=np.int32)
1074
+ else:
1075
+ min_array_means = np.nanmin(array_means) - 1
1076
+ array_means = np.nan_to_num(array_means, nan=min_array_means)
1077
+ rank_result = rankdata(-array_means, method="min").astype(
1078
+ np.int32, copy=False
1079
+ )
1080
+ results["rank_%s" % key_name] = rank_result
1081
+
1082
+ _store("fit_time", out["fit_time"])
1083
+ _store("score_time", out["score_time"])
1084
+ # Use one MaskedArray and mask all the places where the param is not
1085
+ # applicable for that candidate. Use defaultdict as each candidate may
1086
+ # not contain all the params
1087
+ param_results = defaultdict(
1088
+ partial(
1089
+ MaskedArray,
1090
+ np.empty(
1091
+ n_candidates,
1092
+ ),
1093
+ mask=True,
1094
+ dtype=object,
1095
+ )
1096
+ )
1097
+ for cand_idx, params in enumerate(candidate_params):
1098
+ for name, value in params.items():
1099
+ # An all masked empty array gets created for the key
1100
+ # `"param_%s" % name` at the first occurrence of `name`.
1101
+ # Setting the value at an index also unmasks that index
1102
+ param_results["param_%s" % name][cand_idx] = value
1103
+
1104
+ results.update(param_results)
1105
+ # Store a list of param dicts at the key 'params'
1106
+ results["params"] = candidate_params
1107
+
1108
+ test_scores_dict = _normalize_score_results(out["test_scores"])
1109
+ if self.return_train_score:
1110
+ train_scores_dict = _normalize_score_results(out["train_scores"])
1111
+
1112
+ for scorer_name in test_scores_dict:
1113
+ # Computed the (weighted) mean and std for test scores alone
1114
+ _store(
1115
+ "test_%s" % scorer_name,
1116
+ test_scores_dict[scorer_name],
1117
+ splits=True,
1118
+ rank=True,
1119
+ weights=None,
1120
+ )
1121
+ if self.return_train_score:
1122
+ _store(
1123
+ "train_%s" % scorer_name,
1124
+ train_scores_dict[scorer_name],
1125
+ splits=True,
1126
+ )
1127
+
1128
+ return results
1129
+
1130
+ def get_metadata_routing(self):
1131
+ """Get metadata routing of this object.
1132
+
1133
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1134
+ mechanism works.
1135
+
1136
+ .. versionadded:: 1.4
1137
+
1138
+ Returns
1139
+ -------
1140
+ routing : MetadataRouter
1141
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
1142
+ routing information.
1143
+ """
1144
+ router = MetadataRouter(owner=self.__class__.__name__)
1145
+ router.add(
1146
+ estimator=self.estimator,
1147
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
1148
+ )
1149
+
1150
+ scorer, _ = self._get_scorers(convert_multimetric=True)
1151
+ router.add(
1152
+ scorer=scorer,
1153
+ method_mapping=MethodMapping()
1154
+ .add(caller="score", callee="score")
1155
+ .add(caller="fit", callee="score"),
1156
+ )
1157
+ router.add(
1158
+ splitter=self.cv,
1159
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
1160
+ )
1161
+ return router
1162
+
1163
+
1164
+ class GridSearchCV(BaseSearchCV):
1165
+ """Exhaustive search over specified parameter values for an estimator.
1166
+
1167
+ Important members are fit, predict.
1168
+
1169
+ GridSearchCV implements a "fit" and a "score" method.
1170
+ It also implements "score_samples", "predict", "predict_proba",
1171
+ "decision_function", "transform" and "inverse_transform" if they are
1172
+ implemented in the estimator used.
1173
+
1174
+ The parameters of the estimator used to apply these methods are optimized
1175
+ by cross-validated grid-search over a parameter grid.
1176
+
1177
+ Read more in the :ref:`User Guide <grid_search>`.
1178
+
1179
+ Parameters
1180
+ ----------
1181
+ estimator : estimator object
1182
+ This is assumed to implement the scikit-learn estimator interface.
1183
+ Either estimator needs to provide a ``score`` function,
1184
+ or ``scoring`` must be passed.
1185
+
1186
+ param_grid : dict or list of dictionaries
1187
+ Dictionary with parameters names (`str`) as keys and lists of
1188
+ parameter settings to try as values, or a list of such
1189
+ dictionaries, in which case the grids spanned by each dictionary
1190
+ in the list are explored. This enables searching over any sequence
1191
+ of parameter settings.
1192
+
1193
+ scoring : str, callable, list, tuple or dict, default=None
1194
+ Strategy to evaluate the performance of the cross-validated model on
1195
+ the test set.
1196
+
1197
+ If `scoring` represents a single score, one can use:
1198
+
1199
+ - a single string (see :ref:`scoring_parameter`);
1200
+ - a callable (see :ref:`scoring`) that returns a single value.
1201
+
1202
+ If `scoring` represents multiple scores, one can use:
1203
+
1204
+ - a list or tuple of unique strings;
1205
+ - a callable returning a dictionary where the keys are the metric
1206
+ names and the values are the metric scores;
1207
+ - a dictionary with metric names as keys and callables a values.
1208
+
1209
+ See :ref:`multimetric_grid_search` for an example.
1210
+
1211
+ n_jobs : int, default=None
1212
+ Number of jobs to run in parallel.
1213
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1214
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1215
+ for more details.
1216
+
1217
+ .. versionchanged:: v0.20
1218
+ `n_jobs` default changed from 1 to None
1219
+
1220
+ refit : bool, str, or callable, default=True
1221
+ Refit an estimator using the best found parameters on the whole
1222
+ dataset.
1223
+
1224
+ For multiple metric evaluation, this needs to be a `str` denoting the
1225
+ scorer that would be used to find the best parameters for refitting
1226
+ the estimator at the end.
1227
+
1228
+ Where there are considerations other than maximum score in
1229
+ choosing a best estimator, ``refit`` can be set to a function which
1230
+ returns the selected ``best_index_`` given ``cv_results_``. In that
1231
+ case, the ``best_estimator_`` and ``best_params_`` will be set
1232
+ according to the returned ``best_index_`` while the ``best_score_``
1233
+ attribute will not be available.
1234
+
1235
+ The refitted estimator is made available at the ``best_estimator_``
1236
+ attribute and permits using ``predict`` directly on this
1237
+ ``GridSearchCV`` instance.
1238
+
1239
+ Also for multiple metric evaluation, the attributes ``best_index_``,
1240
+ ``best_score_`` and ``best_params_`` will only be available if
1241
+ ``refit`` is set and all of them will be determined w.r.t this specific
1242
+ scorer.
1243
+
1244
+ See ``scoring`` parameter to know more about multiple metric
1245
+ evaluation.
1246
+
1247
+ See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py`
1248
+ to see how to design a custom selection strategy using a callable
1249
+ via `refit`.
1250
+
1251
+ .. versionchanged:: 0.20
1252
+ Support for callable added.
1253
+
1254
+ cv : int, cross-validation generator or an iterable, default=None
1255
+ Determines the cross-validation splitting strategy.
1256
+ Possible inputs for cv are:
1257
+
1258
+ - None, to use the default 5-fold cross validation,
1259
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
1260
+ - :term:`CV splitter`,
1261
+ - An iterable yielding (train, test) splits as arrays of indices.
1262
+
1263
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
1264
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1265
+ other cases, :class:`KFold` is used. These splitters are instantiated
1266
+ with `shuffle=False` so the splits will be the same across calls.
1267
+
1268
+ Refer :ref:`User Guide <cross_validation>` for the various
1269
+ cross-validation strategies that can be used here.
1270
+
1271
+ .. versionchanged:: 0.22
1272
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1273
+
1274
+ verbose : int
1275
+ Controls the verbosity: the higher, the more messages.
1276
+
1277
+ - >1 : the computation time for each fold and parameter candidate is
1278
+ displayed;
1279
+ - >2 : the score is also displayed;
1280
+ - >3 : the fold and candidate parameter indexes are also displayed
1281
+ together with the starting time of the computation.
1282
+
1283
+ pre_dispatch : int, or str, default='2*n_jobs'
1284
+ Controls the number of jobs that get dispatched during parallel
1285
+ execution. Reducing this number can be useful to avoid an
1286
+ explosion of memory consumption when more jobs get dispatched
1287
+ than CPUs can process. This parameter can be:
1288
+
1289
+ - None, in which case all the jobs are immediately
1290
+ created and spawned. Use this for lightweight and
1291
+ fast-running jobs, to avoid delays due to on-demand
1292
+ spawning of the jobs
1293
+
1294
+ - An int, giving the exact number of total jobs that are
1295
+ spawned
1296
+
1297
+ - A str, giving an expression as a function of n_jobs,
1298
+ as in '2*n_jobs'
1299
+
1300
+ error_score : 'raise' or numeric, default=np.nan
1301
+ Value to assign to the score if an error occurs in estimator fitting.
1302
+ If set to 'raise', the error is raised. If a numeric value is given,
1303
+ FitFailedWarning is raised. This parameter does not affect the refit
1304
+ step, which will always raise the error.
1305
+
1306
+ return_train_score : bool, default=False
1307
+ If ``False``, the ``cv_results_`` attribute will not include training
1308
+ scores.
1309
+ Computing training scores is used to get insights on how different
1310
+ parameter settings impact the overfitting/underfitting trade-off.
1311
+ However computing the scores on the training set can be computationally
1312
+ expensive and is not strictly required to select the parameters that
1313
+ yield the best generalization performance.
1314
+
1315
+ .. versionadded:: 0.19
1316
+
1317
+ .. versionchanged:: 0.21
1318
+ Default value was changed from ``True`` to ``False``
1319
+
1320
+ Attributes
1321
+ ----------
1322
+ cv_results_ : dict of numpy (masked) ndarrays
1323
+ A dict with keys as column headers and values as columns, that can be
1324
+ imported into a pandas ``DataFrame``.
1325
+
1326
+ For instance the below given table
1327
+
1328
+ +------------+-----------+------------+-----------------+---+---------+
1329
+ |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
1330
+ +============+===========+============+=================+===+=========+
1331
+ | 'poly' | -- | 2 | 0.80 |...| 2 |
1332
+ +------------+-----------+------------+-----------------+---+---------+
1333
+ | 'poly' | -- | 3 | 0.70 |...| 4 |
1334
+ +------------+-----------+------------+-----------------+---+---------+
1335
+ | 'rbf' | 0.1 | -- | 0.80 |...| 3 |
1336
+ +------------+-----------+------------+-----------------+---+---------+
1337
+ | 'rbf' | 0.2 | -- | 0.93 |...| 1 |
1338
+ +------------+-----------+------------+-----------------+---+---------+
1339
+
1340
+ will be represented by a ``cv_results_`` dict of::
1341
+
1342
+ {
1343
+ 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
1344
+ mask = [False False False False]...)
1345
+ 'param_gamma': masked_array(data = [-- -- 0.1 0.2],
1346
+ mask = [ True True False False]...),
1347
+ 'param_degree': masked_array(data = [2.0 3.0 -- --],
1348
+ mask = [False False True True]...),
1349
+ 'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
1350
+ 'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
1351
+ 'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
1352
+ 'std_test_score' : [0.01, 0.10, 0.05, 0.08],
1353
+ 'rank_test_score' : [2, 4, 3, 1],
1354
+ 'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
1355
+ 'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
1356
+ 'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
1357
+ 'std_train_score' : [0.01, 0.19, 0.00, 0.03],
1358
+ 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
1359
+ 'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
1360
+ 'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
1361
+ 'std_score_time' : [0.00, 0.00, 0.00, 0.01],
1362
+ 'params' : [{'kernel': 'poly', 'degree': 2}, ...],
1363
+ }
1364
+
1365
+ NOTE
1366
+
1367
+ The key ``'params'`` is used to store a list of parameter
1368
+ settings dicts for all the parameter candidates.
1369
+
1370
+ The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
1371
+ ``std_score_time`` are all in seconds.
1372
+
1373
+ For multi-metric evaluation, the scores for all the scorers are
1374
+ available in the ``cv_results_`` dict at the keys ending with that
1375
+ scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
1376
+ above. ('split0_test_precision', 'mean_train_precision' etc.)
1377
+
1378
+ best_estimator_ : estimator
1379
+ Estimator that was chosen by the search, i.e. estimator
1380
+ which gave highest score (or smallest loss if specified)
1381
+ on the left out data. Not available if ``refit=False``.
1382
+
1383
+ See ``refit`` parameter for more information on allowed values.
1384
+
1385
+ best_score_ : float
1386
+ Mean cross-validated score of the best_estimator
1387
+
1388
+ For multi-metric evaluation, this is present only if ``refit`` is
1389
+ specified.
1390
+
1391
+ This attribute is not available if ``refit`` is a function.
1392
+
1393
+ best_params_ : dict
1394
+ Parameter setting that gave the best results on the hold out data.
1395
+
1396
+ For multi-metric evaluation, this is present only if ``refit`` is
1397
+ specified.
1398
+
1399
+ best_index_ : int
1400
+ The index (of the ``cv_results_`` arrays) which corresponds to the best
1401
+ candidate parameter setting.
1402
+
1403
+ The dict at ``search.cv_results_['params'][search.best_index_]`` gives
1404
+ the parameter setting for the best model, that gives the highest
1405
+ mean score (``search.best_score_``).
1406
+
1407
+ For multi-metric evaluation, this is present only if ``refit`` is
1408
+ specified.
1409
+
1410
+ scorer_ : function or a dict
1411
+ Scorer function used on the held out data to choose the best
1412
+ parameters for the model.
1413
+
1414
+ For multi-metric evaluation, this attribute holds the validated
1415
+ ``scoring`` dict which maps the scorer key to the scorer callable.
1416
+
1417
+ n_splits_ : int
1418
+ The number of cross-validation splits (folds/iterations).
1419
+
1420
+ refit_time_ : float
1421
+ Seconds used for refitting the best model on the whole dataset.
1422
+
1423
+ This is present only if ``refit`` is not False.
1424
+
1425
+ .. versionadded:: 0.20
1426
+
1427
+ multimetric_ : bool
1428
+ Whether or not the scorers compute several metrics.
1429
+
1430
+ classes_ : ndarray of shape (n_classes,)
1431
+ The classes labels. This is present only if ``refit`` is specified and
1432
+ the underlying estimator is a classifier.
1433
+
1434
+ n_features_in_ : int
1435
+ Number of features seen during :term:`fit`. Only defined if
1436
+ `best_estimator_` is defined (see the documentation for the `refit`
1437
+ parameter for more details) and that `best_estimator_` exposes
1438
+ `n_features_in_` when fit.
1439
+
1440
+ .. versionadded:: 0.24
1441
+
1442
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1443
+ Names of features seen during :term:`fit`. Only defined if
1444
+ `best_estimator_` is defined (see the documentation for the `refit`
1445
+ parameter for more details) and that `best_estimator_` exposes
1446
+ `feature_names_in_` when fit.
1447
+
1448
+ .. versionadded:: 1.0
1449
+
1450
+ See Also
1451
+ --------
1452
+ ParameterGrid : Generates all the combinations of a hyperparameter grid.
1453
+ train_test_split : Utility function to split the data into a development
1454
+ set usable for fitting a GridSearchCV instance and an evaluation set
1455
+ for its final evaluation.
1456
+ sklearn.metrics.make_scorer : Make a scorer from a performance metric or
1457
+ loss function.
1458
+
1459
+ Notes
1460
+ -----
1461
+ The parameters selected are those that maximize the score of the left out
1462
+ data, unless an explicit score is passed in which case it is used instead.
1463
+
1464
+ If `n_jobs` was set to a value higher than one, the data is copied for each
1465
+ point in the grid (and not `n_jobs` times). This is done for efficiency
1466
+ reasons if individual jobs take very little time, but may raise errors if
1467
+ the dataset is large and not enough memory is available. A workaround in
1468
+ this case is to set `pre_dispatch`. Then, the memory is copied only
1469
+ `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
1470
+ n_jobs`.
1471
+
1472
+ Examples
1473
+ --------
1474
+ >>> from sklearn import svm, datasets
1475
+ >>> from sklearn.model_selection import GridSearchCV
1476
+ >>> iris = datasets.load_iris()
1477
+ >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
1478
+ >>> svc = svm.SVC()
1479
+ >>> clf = GridSearchCV(svc, parameters)
1480
+ >>> clf.fit(iris.data, iris.target)
1481
+ GridSearchCV(estimator=SVC(),
1482
+ param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
1483
+ >>> sorted(clf.cv_results_.keys())
1484
+ ['mean_fit_time', 'mean_score_time', 'mean_test_score',...
1485
+ 'param_C', 'param_kernel', 'params',...
1486
+ 'rank_test_score', 'split0_test_score',...
1487
+ 'split2_test_score', ...
1488
+ 'std_fit_time', 'std_score_time', 'std_test_score']
1489
+ """
1490
+
1491
+ _required_parameters = ["estimator", "param_grid"]
1492
+
1493
+ _parameter_constraints: dict = {
1494
+ **BaseSearchCV._parameter_constraints,
1495
+ "param_grid": [dict, list],
1496
+ }
1497
+
1498
+ def __init__(
1499
+ self,
1500
+ estimator,
1501
+ param_grid,
1502
+ *,
1503
+ scoring=None,
1504
+ n_jobs=None,
1505
+ refit=True,
1506
+ cv=None,
1507
+ verbose=0,
1508
+ pre_dispatch="2*n_jobs",
1509
+ error_score=np.nan,
1510
+ return_train_score=False,
1511
+ ):
1512
+ super().__init__(
1513
+ estimator=estimator,
1514
+ scoring=scoring,
1515
+ n_jobs=n_jobs,
1516
+ refit=refit,
1517
+ cv=cv,
1518
+ verbose=verbose,
1519
+ pre_dispatch=pre_dispatch,
1520
+ error_score=error_score,
1521
+ return_train_score=return_train_score,
1522
+ )
1523
+ self.param_grid = param_grid
1524
+
1525
+ def _run_search(self, evaluate_candidates):
1526
+ """Search all candidates in param_grid"""
1527
+ evaluate_candidates(ParameterGrid(self.param_grid))
1528
+
1529
+
1530
+ class RandomizedSearchCV(BaseSearchCV):
1531
+ """Randomized search on hyper parameters.
1532
+
1533
+ RandomizedSearchCV implements a "fit" and a "score" method.
1534
+ It also implements "score_samples", "predict", "predict_proba",
1535
+ "decision_function", "transform" and "inverse_transform" if they are
1536
+ implemented in the estimator used.
1537
+
1538
+ The parameters of the estimator used to apply these methods are optimized
1539
+ by cross-validated search over parameter settings.
1540
+
1541
+ In contrast to GridSearchCV, not all parameter values are tried out, but
1542
+ rather a fixed number of parameter settings is sampled from the specified
1543
+ distributions. The number of parameter settings that are tried is
1544
+ given by n_iter.
1545
+
1546
+ If all parameters are presented as a list,
1547
+ sampling without replacement is performed. If at least one parameter
1548
+ is given as a distribution, sampling with replacement is used.
1549
+ It is highly recommended to use continuous distributions for continuous
1550
+ parameters.
1551
+
1552
+ Read more in the :ref:`User Guide <randomized_parameter_search>`.
1553
+
1554
+ .. versionadded:: 0.14
1555
+
1556
+ Parameters
1557
+ ----------
1558
+ estimator : estimator object
1559
+ An object of that type is instantiated for each grid point.
1560
+ This is assumed to implement the scikit-learn estimator interface.
1561
+ Either estimator needs to provide a ``score`` function,
1562
+ or ``scoring`` must be passed.
1563
+
1564
+ param_distributions : dict or list of dicts
1565
+ Dictionary with parameters names (`str`) as keys and distributions
1566
+ or lists of parameters to try. Distributions must provide a ``rvs``
1567
+ method for sampling (such as those from scipy.stats.distributions).
1568
+ If a list is given, it is sampled uniformly.
1569
+ If a list of dicts is given, first a dict is sampled uniformly, and
1570
+ then a parameter is sampled using that dict as above.
1571
+
1572
+ n_iter : int, default=10
1573
+ Number of parameter settings that are sampled. n_iter trades
1574
+ off runtime vs quality of the solution.
1575
+
1576
+ scoring : str, callable, list, tuple or dict, default=None
1577
+ Strategy to evaluate the performance of the cross-validated model on
1578
+ the test set.
1579
+
1580
+ If `scoring` represents a single score, one can use:
1581
+
1582
+ - a single string (see :ref:`scoring_parameter`);
1583
+ - a callable (see :ref:`scoring`) that returns a single value.
1584
+
1585
+ If `scoring` represents multiple scores, one can use:
1586
+
1587
+ - a list or tuple of unique strings;
1588
+ - a callable returning a dictionary where the keys are the metric
1589
+ names and the values are the metric scores;
1590
+ - a dictionary with metric names as keys and callables a values.
1591
+
1592
+ See :ref:`multimetric_grid_search` for an example.
1593
+
1594
+ If None, the estimator's score method is used.
1595
+
1596
+ n_jobs : int, default=None
1597
+ Number of jobs to run in parallel.
1598
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1599
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1600
+ for more details.
1601
+
1602
+ .. versionchanged:: v0.20
1603
+ `n_jobs` default changed from 1 to None
1604
+
1605
+ refit : bool, str, or callable, default=True
1606
+ Refit an estimator using the best found parameters on the whole
1607
+ dataset.
1608
+
1609
+ For multiple metric evaluation, this needs to be a `str` denoting the
1610
+ scorer that would be used to find the best parameters for refitting
1611
+ the estimator at the end.
1612
+
1613
+ Where there are considerations other than maximum score in
1614
+ choosing a best estimator, ``refit`` can be set to a function which
1615
+ returns the selected ``best_index_`` given the ``cv_results``. In that
1616
+ case, the ``best_estimator_`` and ``best_params_`` will be set
1617
+ according to the returned ``best_index_`` while the ``best_score_``
1618
+ attribute will not be available.
1619
+
1620
+ The refitted estimator is made available at the ``best_estimator_``
1621
+ attribute and permits using ``predict`` directly on this
1622
+ ``RandomizedSearchCV`` instance.
1623
+
1624
+ Also for multiple metric evaluation, the attributes ``best_index_``,
1625
+ ``best_score_`` and ``best_params_`` will only be available if
1626
+ ``refit`` is set and all of them will be determined w.r.t this specific
1627
+ scorer.
1628
+
1629
+ See ``scoring`` parameter to know more about multiple metric
1630
+ evaluation.
1631
+
1632
+ .. versionchanged:: 0.20
1633
+ Support for callable added.
1634
+
1635
+ cv : int, cross-validation generator or an iterable, default=None
1636
+ Determines the cross-validation splitting strategy.
1637
+ Possible inputs for cv are:
1638
+
1639
+ - None, to use the default 5-fold cross validation,
1640
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
1641
+ - :term:`CV splitter`,
1642
+ - An iterable yielding (train, test) splits as arrays of indices.
1643
+
1644
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
1645
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1646
+ other cases, :class:`KFold` is used. These splitters are instantiated
1647
+ with `shuffle=False` so the splits will be the same across calls.
1648
+
1649
+ Refer :ref:`User Guide <cross_validation>` for the various
1650
+ cross-validation strategies that can be used here.
1651
+
1652
+ .. versionchanged:: 0.22
1653
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1654
+
1655
+ verbose : int
1656
+ Controls the verbosity: the higher, the more messages.
1657
+
1658
+ - >1 : the computation time for each fold and parameter candidate is
1659
+ displayed;
1660
+ - >2 : the score is also displayed;
1661
+ - >3 : the fold and candidate parameter indexes are also displayed
1662
+ together with the starting time of the computation.
1663
+
1664
+ pre_dispatch : int, or str, default='2*n_jobs'
1665
+ Controls the number of jobs that get dispatched during parallel
1666
+ execution. Reducing this number can be useful to avoid an
1667
+ explosion of memory consumption when more jobs get dispatched
1668
+ than CPUs can process. This parameter can be:
1669
+
1670
+ - None, in which case all the jobs are immediately
1671
+ created and spawned. Use this for lightweight and
1672
+ fast-running jobs, to avoid delays due to on-demand
1673
+ spawning of the jobs
1674
+
1675
+ - An int, giving the exact number of total jobs that are
1676
+ spawned
1677
+
1678
+ - A str, giving an expression as a function of n_jobs,
1679
+ as in '2*n_jobs'
1680
+
1681
+ random_state : int, RandomState instance or None, default=None
1682
+ Pseudo random number generator state used for random uniform sampling
1683
+ from lists of possible values instead of scipy.stats distributions.
1684
+ Pass an int for reproducible output across multiple
1685
+ function calls.
1686
+ See :term:`Glossary <random_state>`.
1687
+
1688
+ error_score : 'raise' or numeric, default=np.nan
1689
+ Value to assign to the score if an error occurs in estimator fitting.
1690
+ If set to 'raise', the error is raised. If a numeric value is given,
1691
+ FitFailedWarning is raised. This parameter does not affect the refit
1692
+ step, which will always raise the error.
1693
+
1694
+ return_train_score : bool, default=False
1695
+ If ``False``, the ``cv_results_`` attribute will not include training
1696
+ scores.
1697
+ Computing training scores is used to get insights on how different
1698
+ parameter settings impact the overfitting/underfitting trade-off.
1699
+ However computing the scores on the training set can be computationally
1700
+ expensive and is not strictly required to select the parameters that
1701
+ yield the best generalization performance.
1702
+
1703
+ .. versionadded:: 0.19
1704
+
1705
+ .. versionchanged:: 0.21
1706
+ Default value was changed from ``True`` to ``False``
1707
+
1708
+ Attributes
1709
+ ----------
1710
+ cv_results_ : dict of numpy (masked) ndarrays
1711
+ A dict with keys as column headers and values as columns, that can be
1712
+ imported into a pandas ``DataFrame``.
1713
+
1714
+ For instance the below given table
1715
+
1716
+ +--------------+-------------+-------------------+---+---------------+
1717
+ | param_kernel | param_gamma | split0_test_score |...|rank_test_score|
1718
+ +==============+=============+===================+===+===============+
1719
+ | 'rbf' | 0.1 | 0.80 |...| 1 |
1720
+ +--------------+-------------+-------------------+---+---------------+
1721
+ | 'rbf' | 0.2 | 0.84 |...| 3 |
1722
+ +--------------+-------------+-------------------+---+---------------+
1723
+ | 'rbf' | 0.3 | 0.70 |...| 2 |
1724
+ +--------------+-------------+-------------------+---+---------------+
1725
+
1726
+ will be represented by a ``cv_results_`` dict of::
1727
+
1728
+ {
1729
+ 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
1730
+ mask = False),
1731
+ 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
1732
+ 'split0_test_score' : [0.80, 0.84, 0.70],
1733
+ 'split1_test_score' : [0.82, 0.50, 0.70],
1734
+ 'mean_test_score' : [0.81, 0.67, 0.70],
1735
+ 'std_test_score' : [0.01, 0.24, 0.00],
1736
+ 'rank_test_score' : [1, 3, 2],
1737
+ 'split0_train_score' : [0.80, 0.92, 0.70],
1738
+ 'split1_train_score' : [0.82, 0.55, 0.70],
1739
+ 'mean_train_score' : [0.81, 0.74, 0.70],
1740
+ 'std_train_score' : [0.01, 0.19, 0.00],
1741
+ 'mean_fit_time' : [0.73, 0.63, 0.43],
1742
+ 'std_fit_time' : [0.01, 0.02, 0.01],
1743
+ 'mean_score_time' : [0.01, 0.06, 0.04],
1744
+ 'std_score_time' : [0.00, 0.00, 0.00],
1745
+ 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
1746
+ }
1747
+
1748
+ NOTE
1749
+
1750
+ The key ``'params'`` is used to store a list of parameter
1751
+ settings dicts for all the parameter candidates.
1752
+
1753
+ The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
1754
+ ``std_score_time`` are all in seconds.
1755
+
1756
+ For multi-metric evaluation, the scores for all the scorers are
1757
+ available in the ``cv_results_`` dict at the keys ending with that
1758
+ scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
1759
+ above. ('split0_test_precision', 'mean_train_precision' etc.)
1760
+
1761
+ best_estimator_ : estimator
1762
+ Estimator that was chosen by the search, i.e. estimator
1763
+ which gave highest score (or smallest loss if specified)
1764
+ on the left out data. Not available if ``refit=False``.
1765
+
1766
+ For multi-metric evaluation, this attribute is present only if
1767
+ ``refit`` is specified.
1768
+
1769
+ See ``refit`` parameter for more information on allowed values.
1770
+
1771
+ best_score_ : float
1772
+ Mean cross-validated score of the best_estimator.
1773
+
1774
+ For multi-metric evaluation, this is not available if ``refit`` is
1775
+ ``False``. See ``refit`` parameter for more information.
1776
+
1777
+ This attribute is not available if ``refit`` is a function.
1778
+
1779
+ best_params_ : dict
1780
+ Parameter setting that gave the best results on the hold out data.
1781
+
1782
+ For multi-metric evaluation, this is not available if ``refit`` is
1783
+ ``False``. See ``refit`` parameter for more information.
1784
+
1785
+ best_index_ : int
1786
+ The index (of the ``cv_results_`` arrays) which corresponds to the best
1787
+ candidate parameter setting.
1788
+
1789
+ The dict at ``search.cv_results_['params'][search.best_index_]`` gives
1790
+ the parameter setting for the best model, that gives the highest
1791
+ mean score (``search.best_score_``).
1792
+
1793
+ For multi-metric evaluation, this is not available if ``refit`` is
1794
+ ``False``. See ``refit`` parameter for more information.
1795
+
1796
+ scorer_ : function or a dict
1797
+ Scorer function used on the held out data to choose the best
1798
+ parameters for the model.
1799
+
1800
+ For multi-metric evaluation, this attribute holds the validated
1801
+ ``scoring`` dict which maps the scorer key to the scorer callable.
1802
+
1803
+ n_splits_ : int
1804
+ The number of cross-validation splits (folds/iterations).
1805
+
1806
+ refit_time_ : float
1807
+ Seconds used for refitting the best model on the whole dataset.
1808
+
1809
+ This is present only if ``refit`` is not False.
1810
+
1811
+ .. versionadded:: 0.20
1812
+
1813
+ multimetric_ : bool
1814
+ Whether or not the scorers compute several metrics.
1815
+
1816
+ classes_ : ndarray of shape (n_classes,)
1817
+ The classes labels. This is present only if ``refit`` is specified and
1818
+ the underlying estimator is a classifier.
1819
+
1820
+ n_features_in_ : int
1821
+ Number of features seen during :term:`fit`. Only defined if
1822
+ `best_estimator_` is defined (see the documentation for the `refit`
1823
+ parameter for more details) and that `best_estimator_` exposes
1824
+ `n_features_in_` when fit.
1825
+
1826
+ .. versionadded:: 0.24
1827
+
1828
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
1829
+ Names of features seen during :term:`fit`. Only defined if
1830
+ `best_estimator_` is defined (see the documentation for the `refit`
1831
+ parameter for more details) and that `best_estimator_` exposes
1832
+ `feature_names_in_` when fit.
1833
+
1834
+ .. versionadded:: 1.0
1835
+
1836
+ See Also
1837
+ --------
1838
+ GridSearchCV : Does exhaustive search over a grid of parameters.
1839
+ ParameterSampler : A generator over parameter settings, constructed from
1840
+ param_distributions.
1841
+
1842
+ Notes
1843
+ -----
1844
+ The parameters selected are those that maximize the score of the held-out
1845
+ data, according to the scoring parameter.
1846
+
1847
+ If `n_jobs` was set to a value higher than one, the data is copied for each
1848
+ parameter setting(and not `n_jobs` times). This is done for efficiency
1849
+ reasons if individual jobs take very little time, but may raise errors if
1850
+ the dataset is large and not enough memory is available. A workaround in
1851
+ this case is to set `pre_dispatch`. Then, the memory is copied only
1852
+ `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
1853
+ n_jobs`.
1854
+
1855
+ Examples
1856
+ --------
1857
+ >>> from sklearn.datasets import load_iris
1858
+ >>> from sklearn.linear_model import LogisticRegression
1859
+ >>> from sklearn.model_selection import RandomizedSearchCV
1860
+ >>> from scipy.stats import uniform
1861
+ >>> iris = load_iris()
1862
+ >>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200,
1863
+ ... random_state=0)
1864
+ >>> distributions = dict(C=uniform(loc=0, scale=4),
1865
+ ... penalty=['l2', 'l1'])
1866
+ >>> clf = RandomizedSearchCV(logistic, distributions, random_state=0)
1867
+ >>> search = clf.fit(iris.data, iris.target)
1868
+ >>> search.best_params_
1869
+ {'C': 2..., 'penalty': 'l1'}
1870
+ """
1871
+
1872
+ _required_parameters = ["estimator", "param_distributions"]
1873
+
1874
+ _parameter_constraints: dict = {
1875
+ **BaseSearchCV._parameter_constraints,
1876
+ "param_distributions": [dict, list],
1877
+ "n_iter": [Interval(numbers.Integral, 1, None, closed="left")],
1878
+ "random_state": ["random_state"],
1879
+ }
1880
+
1881
+ def __init__(
1882
+ self,
1883
+ estimator,
1884
+ param_distributions,
1885
+ *,
1886
+ n_iter=10,
1887
+ scoring=None,
1888
+ n_jobs=None,
1889
+ refit=True,
1890
+ cv=None,
1891
+ verbose=0,
1892
+ pre_dispatch="2*n_jobs",
1893
+ random_state=None,
1894
+ error_score=np.nan,
1895
+ return_train_score=False,
1896
+ ):
1897
+ self.param_distributions = param_distributions
1898
+ self.n_iter = n_iter
1899
+ self.random_state = random_state
1900
+ super().__init__(
1901
+ estimator=estimator,
1902
+ scoring=scoring,
1903
+ n_jobs=n_jobs,
1904
+ refit=refit,
1905
+ cv=cv,
1906
+ verbose=verbose,
1907
+ pre_dispatch=pre_dispatch,
1908
+ error_score=error_score,
1909
+ return_train_score=return_train_score,
1910
+ )
1911
+
1912
+ def _run_search(self, evaluate_candidates):
1913
+ """Search n_iter candidates from param_distributions"""
1914
+ evaluate_candidates(
1915
+ ParameterSampler(
1916
+ self.param_distributions, self.n_iter, random_state=self.random_state
1917
+ )
1918
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_split.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_validation.py ADDED
@@ -0,0 +1,2360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.model_selection._validation` module includes classes and
3
+ functions to validate the model.
4
+ """
5
+
6
+ # Author: Alexandre Gramfort <[email protected]>
7
+ # Gael Varoquaux <[email protected]>
8
+ # Olivier Grisel <[email protected]>
9
+ # Raghav RV <[email protected]>
10
+ # Michal Karbownik <[email protected]>
11
+ # License: BSD 3 clause
12
+
13
+
14
+ import numbers
15
+ import time
16
+ import warnings
17
+ from collections import Counter
18
+ from contextlib import suppress
19
+ from functools import partial
20
+ from numbers import Real
21
+ from traceback import format_exc
22
+
23
+ import numpy as np
24
+ import scipy.sparse as sp
25
+ from joblib import logger
26
+
27
+ from ..base import clone, is_classifier
28
+ from ..exceptions import FitFailedWarning, UnsetMetadataPassedError
29
+ from ..metrics import check_scoring, get_scorer_names
30
+ from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
31
+ from ..preprocessing import LabelEncoder
32
+ from ..utils import Bunch, _safe_indexing, check_random_state, indexable
33
+ from ..utils._param_validation import (
34
+ HasMethods,
35
+ Integral,
36
+ Interval,
37
+ StrOptions,
38
+ validate_params,
39
+ )
40
+ from ..utils.metadata_routing import (
41
+ MetadataRouter,
42
+ MethodMapping,
43
+ _routing_enabled,
44
+ process_routing,
45
+ )
46
+ from ..utils.metaestimators import _safe_split
47
+ from ..utils.parallel import Parallel, delayed
48
+ from ..utils.validation import _check_method_params, _num_samples
49
+ from ._split import check_cv
50
+
51
+ __all__ = [
52
+ "cross_validate",
53
+ "cross_val_score",
54
+ "cross_val_predict",
55
+ "permutation_test_score",
56
+ "learning_curve",
57
+ "validation_curve",
58
+ ]
59
+
60
+
61
+ def _check_params_groups_deprecation(fit_params, params, groups):
62
+ """A helper function to check deprecations on `groups` and `fit_params`.
63
+
64
+ To be removed when set_config(enable_metadata_routing=False) is not possible.
65
+ """
66
+ if params is not None and fit_params is not None:
67
+ raise ValueError(
68
+ "`params` and `fit_params` cannot both be provided. Pass parameters "
69
+ "via `params`. `fit_params` is deprecated and will be removed in "
70
+ "version 1.6."
71
+ )
72
+ elif fit_params is not None:
73
+ warnings.warn(
74
+ (
75
+ "`fit_params` is deprecated and will be removed in version 1.6. "
76
+ "Pass parameters via `params` instead."
77
+ ),
78
+ FutureWarning,
79
+ )
80
+ params = fit_params
81
+
82
+ params = {} if params is None else params
83
+
84
+ if groups is not None and _routing_enabled():
85
+ raise ValueError(
86
+ "`groups` can only be passed if metadata routing is not enabled via"
87
+ " `sklearn.set_config(enable_metadata_routing=True)`. When routing is"
88
+ " enabled, pass `groups` alongside other metadata via the `params` argument"
89
+ " instead."
90
+ )
91
+
92
+ return params
93
+
94
+
95
+ @validate_params(
96
+ {
97
+ "estimator": [HasMethods("fit")],
98
+ "X": ["array-like", "sparse matrix"],
99
+ "y": ["array-like", None],
100
+ "groups": ["array-like", None],
101
+ "scoring": [
102
+ StrOptions(set(get_scorer_names())),
103
+ callable,
104
+ list,
105
+ tuple,
106
+ dict,
107
+ None,
108
+ ],
109
+ "cv": ["cv_object"],
110
+ "n_jobs": [Integral, None],
111
+ "verbose": ["verbose"],
112
+ "fit_params": [dict, None],
113
+ "params": [dict, None],
114
+ "pre_dispatch": [Integral, str],
115
+ "return_train_score": ["boolean"],
116
+ "return_estimator": ["boolean"],
117
+ "return_indices": ["boolean"],
118
+ "error_score": [StrOptions({"raise"}), Real],
119
+ },
120
+ prefer_skip_nested_validation=False, # estimator is not validated yet
121
+ )
122
+ def cross_validate(
123
+ estimator,
124
+ X,
125
+ y=None,
126
+ *,
127
+ groups=None,
128
+ scoring=None,
129
+ cv=None,
130
+ n_jobs=None,
131
+ verbose=0,
132
+ fit_params=None,
133
+ params=None,
134
+ pre_dispatch="2*n_jobs",
135
+ return_train_score=False,
136
+ return_estimator=False,
137
+ return_indices=False,
138
+ error_score=np.nan,
139
+ ):
140
+ """Evaluate metric(s) by cross-validation and also record fit/score times.
141
+
142
+ Read more in the :ref:`User Guide <multimetric_cross_validation>`.
143
+
144
+ Parameters
145
+ ----------
146
+ estimator : estimator object implementing 'fit'
147
+ The object to use to fit the data.
148
+
149
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
150
+ The data to fit. Can be for example a list, or an array.
151
+
152
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
153
+ The target variable to try to predict in the case of
154
+ supervised learning.
155
+
156
+ groups : array-like of shape (n_samples,), default=None
157
+ Group labels for the samples used while splitting the dataset into
158
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
159
+ instance (e.g., :class:`GroupKFold`).
160
+
161
+ .. versionchanged:: 1.4
162
+ ``groups`` can only be passed if metadata routing is not enabled
163
+ via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
164
+ is enabled, pass ``groups`` alongside other metadata via the ``params``
165
+ argument instead. E.g.:
166
+ ``cross_validate(..., params={'groups': groups})``.
167
+
168
+ scoring : str, callable, list, tuple, or dict, default=None
169
+ Strategy to evaluate the performance of the cross-validated model on
170
+ the test set.
171
+
172
+ If `scoring` represents a single score, one can use:
173
+
174
+ - a single string (see :ref:`scoring_parameter`);
175
+ - a callable (see :ref:`scoring`) that returns a single value.
176
+
177
+ If `scoring` represents multiple scores, one can use:
178
+
179
+ - a list or tuple of unique strings;
180
+ - a callable returning a dictionary where the keys are the metric
181
+ names and the values are the metric scores;
182
+ - a dictionary with metric names as keys and callables a values.
183
+
184
+ See :ref:`multimetric_grid_search` for an example.
185
+
186
+ cv : int, cross-validation generator or an iterable, default=None
187
+ Determines the cross-validation splitting strategy.
188
+ Possible inputs for cv are:
189
+
190
+ - None, to use the default 5-fold cross validation,
191
+ - int, to specify the number of folds in a `(Stratified)KFold`,
192
+ - :term:`CV splitter`,
193
+ - An iterable yielding (train, test) splits as arrays of indices.
194
+
195
+ For int/None inputs, if the estimator is a classifier and ``y`` is
196
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
197
+ other cases, :class:`KFold` is used. These splitters are instantiated
198
+ with `shuffle=False` so the splits will be the same across calls.
199
+
200
+ Refer :ref:`User Guide <cross_validation>` for the various
201
+ cross-validation strategies that can be used here.
202
+
203
+ .. versionchanged:: 0.22
204
+ ``cv`` default value if None changed from 3-fold to 5-fold.
205
+
206
+ n_jobs : int, default=None
207
+ Number of jobs to run in parallel. Training the estimator and computing
208
+ the score are parallelized over the cross-validation splits.
209
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
210
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
211
+ for more details.
212
+
213
+ verbose : int, default=0
214
+ The verbosity level.
215
+
216
+ fit_params : dict, default=None
217
+ Parameters to pass to the fit method of the estimator.
218
+
219
+ .. deprecated:: 1.4
220
+ This parameter is deprecated and will be removed in version 1.6. Use
221
+ ``params`` instead.
222
+
223
+ params : dict, default=None
224
+ Parameters to pass to the underlying estimator's ``fit``, the scorer,
225
+ and the CV splitter.
226
+
227
+ .. versionadded:: 1.4
228
+
229
+ pre_dispatch : int or str, default='2*n_jobs'
230
+ Controls the number of jobs that get dispatched during parallel
231
+ execution. Reducing this number can be useful to avoid an
232
+ explosion of memory consumption when more jobs get dispatched
233
+ than CPUs can process. This parameter can be:
234
+
235
+ - An int, giving the exact number of total jobs that are
236
+ spawned
237
+
238
+ - A str, giving an expression as a function of n_jobs,
239
+ as in '2*n_jobs'
240
+
241
+ return_train_score : bool, default=False
242
+ Whether to include train scores.
243
+ Computing training scores is used to get insights on how different
244
+ parameter settings impact the overfitting/underfitting trade-off.
245
+ However computing the scores on the training set can be computationally
246
+ expensive and is not strictly required to select the parameters that
247
+ yield the best generalization performance.
248
+
249
+ .. versionadded:: 0.19
250
+
251
+ .. versionchanged:: 0.21
252
+ Default value was changed from ``True`` to ``False``
253
+
254
+ return_estimator : bool, default=False
255
+ Whether to return the estimators fitted on each split.
256
+
257
+ .. versionadded:: 0.20
258
+
259
+ return_indices : bool, default=False
260
+ Whether to return the train-test indices selected for each split.
261
+
262
+ .. versionadded:: 1.3
263
+
264
+ error_score : 'raise' or numeric, default=np.nan
265
+ Value to assign to the score if an error occurs in estimator fitting.
266
+ If set to 'raise', the error is raised.
267
+ If a numeric value is given, FitFailedWarning is raised.
268
+
269
+ .. versionadded:: 0.20
270
+
271
+ Returns
272
+ -------
273
+ scores : dict of float arrays of shape (n_splits,)
274
+ Array of scores of the estimator for each run of the cross validation.
275
+
276
+ A dict of arrays containing the score/time arrays for each scorer is
277
+ returned. The possible keys for this ``dict`` are:
278
+
279
+ ``test_score``
280
+ The score array for test scores on each cv split.
281
+ Suffix ``_score`` in ``test_score`` changes to a specific
282
+ metric like ``test_r2`` or ``test_auc`` if there are
283
+ multiple scoring metrics in the scoring parameter.
284
+ ``train_score``
285
+ The score array for train scores on each cv split.
286
+ Suffix ``_score`` in ``train_score`` changes to a specific
287
+ metric like ``train_r2`` or ``train_auc`` if there are
288
+ multiple scoring metrics in the scoring parameter.
289
+ This is available only if ``return_train_score`` parameter
290
+ is ``True``.
291
+ ``fit_time``
292
+ The time for fitting the estimator on the train
293
+ set for each cv split.
294
+ ``score_time``
295
+ The time for scoring the estimator on the test set for each
296
+ cv split. (Note time for scoring on the train set is not
297
+ included even if ``return_train_score`` is set to ``True``
298
+ ``estimator``
299
+ The estimator objects for each cv split.
300
+ This is available only if ``return_estimator`` parameter
301
+ is set to ``True``.
302
+ ``indices``
303
+ The train/test positional indices for each cv split. A dictionary
304
+ is returned where the keys are either `"train"` or `"test"`
305
+ and the associated values are a list of integer-dtyped NumPy
306
+ arrays with the indices. Available only if `return_indices=True`.
307
+
308
+ See Also
309
+ --------
310
+ cross_val_score : Run cross-validation for single metric evaluation.
311
+
312
+ cross_val_predict : Get predictions from each split of cross-validation for
313
+ diagnostic purposes.
314
+
315
+ sklearn.metrics.make_scorer : Make a scorer from a performance metric or
316
+ loss function.
317
+
318
+ Examples
319
+ --------
320
+ >>> from sklearn import datasets, linear_model
321
+ >>> from sklearn.model_selection import cross_validate
322
+ >>> from sklearn.metrics import make_scorer
323
+ >>> from sklearn.metrics import confusion_matrix
324
+ >>> from sklearn.svm import LinearSVC
325
+ >>> diabetes = datasets.load_diabetes()
326
+ >>> X = diabetes.data[:150]
327
+ >>> y = diabetes.target[:150]
328
+ >>> lasso = linear_model.Lasso()
329
+
330
+ Single metric evaluation using ``cross_validate``
331
+
332
+ >>> cv_results = cross_validate(lasso, X, y, cv=3)
333
+ >>> sorted(cv_results.keys())
334
+ ['fit_time', 'score_time', 'test_score']
335
+ >>> cv_results['test_score']
336
+ array([0.3315057 , 0.08022103, 0.03531816])
337
+
338
+ Multiple metric evaluation using ``cross_validate``
339
+ (please refer the ``scoring`` parameter doc for more information)
340
+
341
+ >>> scores = cross_validate(lasso, X, y, cv=3,
342
+ ... scoring=('r2', 'neg_mean_squared_error'),
343
+ ... return_train_score=True)
344
+ >>> print(scores['test_neg_mean_squared_error'])
345
+ [-3635.5... -3573.3... -6114.7...]
346
+ >>> print(scores['train_r2'])
347
+ [0.28009951 0.3908844 0.22784907]
348
+ """
349
+ params = _check_params_groups_deprecation(fit_params, params, groups)
350
+
351
+ X, y = indexable(X, y)
352
+
353
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
354
+
355
+ if callable(scoring):
356
+ scorers = scoring
357
+ elif scoring is None or isinstance(scoring, str):
358
+ scorers = check_scoring(estimator, scoring)
359
+ else:
360
+ scorers = _check_multimetric_scoring(estimator, scoring)
361
+
362
+ if _routing_enabled():
363
+ # `cross_validate` will create a `_MultiMetricScorer` if `scoring` is a
364
+ # dict at a later stage. We need the same object for the purpose of
365
+ # routing. However, creating it here and passing it around would create
366
+ # a much larger diff since the dict is used in many places.
367
+ if isinstance(scorers, dict):
368
+ _scorer = _MultimetricScorer(
369
+ scorers=scorers, raise_exc=(error_score == "raise")
370
+ )
371
+ else:
372
+ _scorer = scorers
373
+ # For estimators, a MetadataRouter is created in get_metadata_routing
374
+ # methods. For these router methods, we create the router to use
375
+ # `process_routing` on it.
376
+ router = (
377
+ MetadataRouter(owner="cross_validate")
378
+ .add(
379
+ splitter=cv,
380
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
381
+ )
382
+ .add(
383
+ estimator=estimator,
384
+ # TODO(SLEP6): also pass metadata to the predict method for
385
+ # scoring?
386
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
387
+ )
388
+ .add(
389
+ scorer=_scorer,
390
+ method_mapping=MethodMapping().add(caller="fit", callee="score"),
391
+ )
392
+ )
393
+ try:
394
+ routed_params = process_routing(router, "fit", **params)
395
+ except UnsetMetadataPassedError as e:
396
+ # The default exception would mention `fit` since in the above
397
+ # `process_routing` code, we pass `fit` as the caller. However,
398
+ # the user is not calling `fit` directly, so we change the message
399
+ # to make it more suitable for this case.
400
+ unrequested_params = sorted(e.unrequested_params)
401
+ raise UnsetMetadataPassedError(
402
+ message=(
403
+ f"{unrequested_params} are passed to cross validation but are not"
404
+ " explicitly set as requested or not requested for cross_validate's"
405
+ f" estimator: {estimator.__class__.__name__}. Call"
406
+ " `.set_fit_request({{metadata}}=True)` on the estimator for"
407
+ f" each metadata in {unrequested_params} that you"
408
+ " want to use and `metadata=False` for not using it. See the"
409
+ " Metadata Routing User guide"
410
+ " <https://scikit-learn.org/stable/metadata_routing.html> for more"
411
+ " information."
412
+ ),
413
+ unrequested_params=e.unrequested_params,
414
+ routed_params=e.routed_params,
415
+ )
416
+ else:
417
+ routed_params = Bunch()
418
+ routed_params.splitter = Bunch(split={"groups": groups})
419
+ routed_params.estimator = Bunch(fit=params)
420
+ routed_params.scorer = Bunch(score={})
421
+
422
+ indices = cv.split(X, y, **routed_params.splitter.split)
423
+ if return_indices:
424
+ # materialize the indices since we need to store them in the returned dict
425
+ indices = list(indices)
426
+
427
+ # We clone the estimator to make sure that all the folds are
428
+ # independent, and that it is pickle-able.
429
+ parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
430
+ results = parallel(
431
+ delayed(_fit_and_score)(
432
+ clone(estimator),
433
+ X,
434
+ y,
435
+ scorer=scorers,
436
+ train=train,
437
+ test=test,
438
+ verbose=verbose,
439
+ parameters=None,
440
+ fit_params=routed_params.estimator.fit,
441
+ score_params=routed_params.scorer.score,
442
+ return_train_score=return_train_score,
443
+ return_times=True,
444
+ return_estimator=return_estimator,
445
+ error_score=error_score,
446
+ )
447
+ for train, test in indices
448
+ )
449
+
450
+ _warn_or_raise_about_fit_failures(results, error_score)
451
+
452
+ # For callable scoring, the return type is only know after calling. If the
453
+ # return type is a dictionary, the error scores can now be inserted with
454
+ # the correct key.
455
+ if callable(scoring):
456
+ _insert_error_scores(results, error_score)
457
+
458
+ results = _aggregate_score_dicts(results)
459
+
460
+ ret = {}
461
+ ret["fit_time"] = results["fit_time"]
462
+ ret["score_time"] = results["score_time"]
463
+
464
+ if return_estimator:
465
+ ret["estimator"] = results["estimator"]
466
+
467
+ if return_indices:
468
+ ret["indices"] = {}
469
+ ret["indices"]["train"], ret["indices"]["test"] = zip(*indices)
470
+
471
+ test_scores_dict = _normalize_score_results(results["test_scores"])
472
+ if return_train_score:
473
+ train_scores_dict = _normalize_score_results(results["train_scores"])
474
+
475
+ for name in test_scores_dict:
476
+ ret["test_%s" % name] = test_scores_dict[name]
477
+ if return_train_score:
478
+ key = "train_%s" % name
479
+ ret[key] = train_scores_dict[name]
480
+
481
+ return ret
482
+
483
+
484
+ def _insert_error_scores(results, error_score):
485
+ """Insert error in `results` by replacing them inplace with `error_score`.
486
+
487
+ This only applies to multimetric scores because `_fit_and_score` will
488
+ handle the single metric case.
489
+ """
490
+ successful_score = None
491
+ failed_indices = []
492
+ for i, result in enumerate(results):
493
+ if result["fit_error"] is not None:
494
+ failed_indices.append(i)
495
+ elif successful_score is None:
496
+ successful_score = result["test_scores"]
497
+
498
+ if isinstance(successful_score, dict):
499
+ formatted_error = {name: error_score for name in successful_score}
500
+ for i in failed_indices:
501
+ results[i]["test_scores"] = formatted_error.copy()
502
+ if "train_scores" in results[i]:
503
+ results[i]["train_scores"] = formatted_error.copy()
504
+
505
+
506
+ def _normalize_score_results(scores, scaler_score_key="score"):
507
+ """Creates a scoring dictionary based on the type of `scores`"""
508
+ if isinstance(scores[0], dict):
509
+ # multimetric scoring
510
+ return _aggregate_score_dicts(scores)
511
+ # scaler
512
+ return {scaler_score_key: scores}
513
+
514
+
515
+ def _warn_or_raise_about_fit_failures(results, error_score):
516
+ fit_errors = [
517
+ result["fit_error"] for result in results if result["fit_error"] is not None
518
+ ]
519
+ if fit_errors:
520
+ num_failed_fits = len(fit_errors)
521
+ num_fits = len(results)
522
+ fit_errors_counter = Counter(fit_errors)
523
+ delimiter = "-" * 80 + "\n"
524
+ fit_errors_summary = "\n".join(
525
+ f"{delimiter}{n} fits failed with the following error:\n{error}"
526
+ for error, n in fit_errors_counter.items()
527
+ )
528
+
529
+ if num_failed_fits == num_fits:
530
+ all_fits_failed_message = (
531
+ f"\nAll the {num_fits} fits failed.\n"
532
+ "It is very likely that your model is misconfigured.\n"
533
+ "You can try to debug the error by setting error_score='raise'.\n\n"
534
+ f"Below are more details about the failures:\n{fit_errors_summary}"
535
+ )
536
+ raise ValueError(all_fits_failed_message)
537
+
538
+ else:
539
+ some_fits_failed_message = (
540
+ f"\n{num_failed_fits} fits failed out of a total of {num_fits}.\n"
541
+ "The score on these train-test partitions for these parameters"
542
+ f" will be set to {error_score}.\n"
543
+ "If these failures are not expected, you can try to debug them "
544
+ "by setting error_score='raise'.\n\n"
545
+ f"Below are more details about the failures:\n{fit_errors_summary}"
546
+ )
547
+ warnings.warn(some_fits_failed_message, FitFailedWarning)
548
+
549
+
550
+ @validate_params(
551
+ {
552
+ "estimator": [HasMethods("fit")],
553
+ "X": ["array-like", "sparse matrix"],
554
+ "y": ["array-like", None],
555
+ "groups": ["array-like", None],
556
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
557
+ "cv": ["cv_object"],
558
+ "n_jobs": [Integral, None],
559
+ "verbose": ["verbose"],
560
+ "fit_params": [dict, None],
561
+ "params": [dict, None],
562
+ "pre_dispatch": [Integral, str, None],
563
+ "error_score": [StrOptions({"raise"}), Real],
564
+ },
565
+ prefer_skip_nested_validation=False, # estimator is not validated yet
566
+ )
567
+ def cross_val_score(
568
+ estimator,
569
+ X,
570
+ y=None,
571
+ *,
572
+ groups=None,
573
+ scoring=None,
574
+ cv=None,
575
+ n_jobs=None,
576
+ verbose=0,
577
+ fit_params=None,
578
+ params=None,
579
+ pre_dispatch="2*n_jobs",
580
+ error_score=np.nan,
581
+ ):
582
+ """Evaluate a score by cross-validation.
583
+
584
+ Read more in the :ref:`User Guide <cross_validation>`.
585
+
586
+ Parameters
587
+ ----------
588
+ estimator : estimator object implementing 'fit'
589
+ The object to use to fit the data.
590
+
591
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
592
+ The data to fit. Can be for example a list, or an array.
593
+
594
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
595
+ default=None
596
+ The target variable to try to predict in the case of
597
+ supervised learning.
598
+
599
+ groups : array-like of shape (n_samples,), default=None
600
+ Group labels for the samples used while splitting the dataset into
601
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
602
+ instance (e.g., :class:`GroupKFold`).
603
+
604
+ .. versionchanged:: 1.4
605
+ ``groups`` can only be passed if metadata routing is not enabled
606
+ via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
607
+ is enabled, pass ``groups`` alongside other metadata via the ``params``
608
+ argument instead. E.g.:
609
+ ``cross_val_score(..., params={'groups': groups})``.
610
+
611
+ scoring : str or callable, default=None
612
+ A str (see model evaluation documentation) or
613
+ a scorer callable object / function with signature
614
+ ``scorer(estimator, X, y)`` which should return only
615
+ a single value.
616
+
617
+ Similar to :func:`cross_validate`
618
+ but only a single metric is permitted.
619
+
620
+ If `None`, the estimator's default scorer (if available) is used.
621
+
622
+ cv : int, cross-validation generator or an iterable, default=None
623
+ Determines the cross-validation splitting strategy.
624
+ Possible inputs for cv are:
625
+
626
+ - `None`, to use the default 5-fold cross validation,
627
+ - int, to specify the number of folds in a `(Stratified)KFold`,
628
+ - :term:`CV splitter`,
629
+ - An iterable that generates (train, test) splits as arrays of indices.
630
+
631
+ For `int`/`None` inputs, if the estimator is a classifier and `y` is
632
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
633
+ other cases, :class:`KFold` is used. These splitters are instantiated
634
+ with `shuffle=False` so the splits will be the same across calls.
635
+
636
+ Refer :ref:`User Guide <cross_validation>` for the various
637
+ cross-validation strategies that can be used here.
638
+
639
+ .. versionchanged:: 0.22
640
+ `cv` default value if `None` changed from 3-fold to 5-fold.
641
+
642
+ n_jobs : int, default=None
643
+ Number of jobs to run in parallel. Training the estimator and computing
644
+ the score are parallelized over the cross-validation splits.
645
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
646
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
647
+ for more details.
648
+
649
+ verbose : int, default=0
650
+ The verbosity level.
651
+
652
+ fit_params : dict, default=None
653
+ Parameters to pass to the fit method of the estimator.
654
+
655
+ .. deprecated:: 1.4
656
+ This parameter is deprecated and will be removed in version 1.6. Use
657
+ ``params`` instead.
658
+
659
+ params : dict, default=None
660
+ Parameters to pass to the underlying estimator's ``fit``, the scorer,
661
+ and the CV splitter.
662
+
663
+ .. versionadded:: 1.4
664
+
665
+ pre_dispatch : int or str, default='2*n_jobs'
666
+ Controls the number of jobs that get dispatched during parallel
667
+ execution. Reducing this number can be useful to avoid an
668
+ explosion of memory consumption when more jobs get dispatched
669
+ than CPUs can process. This parameter can be:
670
+
671
+ - ``None``, in which case all the jobs are immediately
672
+ created and spawned. Use this for lightweight and
673
+ fast-running jobs, to avoid delays due to on-demand
674
+ spawning of the jobs
675
+
676
+ - An int, giving the exact number of total jobs that are
677
+ spawned
678
+
679
+ - A str, giving an expression as a function of n_jobs,
680
+ as in '2*n_jobs'
681
+
682
+ error_score : 'raise' or numeric, default=np.nan
683
+ Value to assign to the score if an error occurs in estimator fitting.
684
+ If set to 'raise', the error is raised.
685
+ If a numeric value is given, FitFailedWarning is raised.
686
+
687
+ .. versionadded:: 0.20
688
+
689
+ Returns
690
+ -------
691
+ scores : ndarray of float of shape=(len(list(cv)),)
692
+ Array of scores of the estimator for each run of the cross validation.
693
+
694
+ See Also
695
+ --------
696
+ cross_validate : To run cross-validation on multiple metrics and also to
697
+ return train scores, fit times and score times.
698
+
699
+ cross_val_predict : Get predictions from each split of cross-validation for
700
+ diagnostic purposes.
701
+
702
+ sklearn.metrics.make_scorer : Make a scorer from a performance metric or
703
+ loss function.
704
+
705
+ Examples
706
+ --------
707
+ >>> from sklearn import datasets, linear_model
708
+ >>> from sklearn.model_selection import cross_val_score
709
+ >>> diabetes = datasets.load_diabetes()
710
+ >>> X = diabetes.data[:150]
711
+ >>> y = diabetes.target[:150]
712
+ >>> lasso = linear_model.Lasso()
713
+ >>> print(cross_val_score(lasso, X, y, cv=3))
714
+ [0.3315057 0.08022103 0.03531816]
715
+ """
716
+ # To ensure multimetric format is not supported
717
+ scorer = check_scoring(estimator, scoring=scoring)
718
+
719
+ cv_results = cross_validate(
720
+ estimator=estimator,
721
+ X=X,
722
+ y=y,
723
+ groups=groups,
724
+ scoring={"score": scorer},
725
+ cv=cv,
726
+ n_jobs=n_jobs,
727
+ verbose=verbose,
728
+ fit_params=fit_params,
729
+ params=params,
730
+ pre_dispatch=pre_dispatch,
731
+ error_score=error_score,
732
+ )
733
+ return cv_results["test_score"]
734
+
735
+
736
+ def _fit_and_score(
737
+ estimator,
738
+ X,
739
+ y,
740
+ *,
741
+ scorer,
742
+ train,
743
+ test,
744
+ verbose,
745
+ parameters,
746
+ fit_params,
747
+ score_params,
748
+ return_train_score=False,
749
+ return_parameters=False,
750
+ return_n_test_samples=False,
751
+ return_times=False,
752
+ return_estimator=False,
753
+ split_progress=None,
754
+ candidate_progress=None,
755
+ error_score=np.nan,
756
+ ):
757
+ """Fit estimator and compute scores for a given dataset split.
758
+
759
+ Parameters
760
+ ----------
761
+ estimator : estimator object implementing 'fit'
762
+ The object to use to fit the data.
763
+
764
+ X : array-like of shape (n_samples, n_features)
765
+ The data to fit.
766
+
767
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
768
+ The target variable to try to predict in the case of
769
+ supervised learning.
770
+
771
+ scorer : A single callable or dict mapping scorer name to the callable
772
+ If it is a single callable, the return value for ``train_scores`` and
773
+ ``test_scores`` is a single float.
774
+
775
+ For a dict, it should be one mapping the scorer name to the scorer
776
+ callable object / function.
777
+
778
+ The callable object / fn should have signature
779
+ ``scorer(estimator, X, y)``.
780
+
781
+ train : array-like of shape (n_train_samples,)
782
+ Indices of training samples.
783
+
784
+ test : array-like of shape (n_test_samples,)
785
+ Indices of test samples.
786
+
787
+ verbose : int
788
+ The verbosity level.
789
+
790
+ error_score : 'raise' or numeric, default=np.nan
791
+ Value to assign to the score if an error occurs in estimator fitting.
792
+ If set to 'raise', the error is raised.
793
+ If a numeric value is given, FitFailedWarning is raised.
794
+
795
+ parameters : dict or None
796
+ Parameters to be set on the estimator.
797
+
798
+ fit_params : dict or None
799
+ Parameters that will be passed to ``estimator.fit``.
800
+
801
+ score_params : dict or None
802
+ Parameters that will be passed to the scorer.
803
+
804
+ return_train_score : bool, default=False
805
+ Compute and return score on training set.
806
+
807
+ return_parameters : bool, default=False
808
+ Return parameters that has been used for the estimator.
809
+
810
+ split_progress : {list, tuple} of int, default=None
811
+ A list or tuple of format (<current_split_id>, <total_num_of_splits>).
812
+
813
+ candidate_progress : {list, tuple} of int, default=None
814
+ A list or tuple of format
815
+ (<current_candidate_id>, <total_number_of_candidates>).
816
+
817
+ return_n_test_samples : bool, default=False
818
+ Whether to return the ``n_test_samples``.
819
+
820
+ return_times : bool, default=False
821
+ Whether to return the fit/score times.
822
+
823
+ return_estimator : bool, default=False
824
+ Whether to return the fitted estimator.
825
+
826
+ Returns
827
+ -------
828
+ result : dict with the following attributes
829
+ train_scores : dict of scorer name -> float
830
+ Score on training set (for all the scorers),
831
+ returned only if `return_train_score` is `True`.
832
+ test_scores : dict of scorer name -> float
833
+ Score on testing set (for all the scorers).
834
+ n_test_samples : int
835
+ Number of test samples.
836
+ fit_time : float
837
+ Time spent for fitting in seconds.
838
+ score_time : float
839
+ Time spent for scoring in seconds.
840
+ parameters : dict or None
841
+ The parameters that have been evaluated.
842
+ estimator : estimator object
843
+ The fitted estimator.
844
+ fit_error : str or None
845
+ Traceback str if the fit failed, None if the fit succeeded.
846
+ """
847
+ if not isinstance(error_score, numbers.Number) and error_score != "raise":
848
+ raise ValueError(
849
+ "error_score must be the string 'raise' or a numeric value. "
850
+ "(Hint: if using 'raise', please make sure that it has been "
851
+ "spelled correctly.)"
852
+ )
853
+
854
+ progress_msg = ""
855
+ if verbose > 2:
856
+ if split_progress is not None:
857
+ progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
858
+ if candidate_progress and verbose > 9:
859
+ progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}"
860
+
861
+ if verbose > 1:
862
+ if parameters is None:
863
+ params_msg = ""
864
+ else:
865
+ sorted_keys = sorted(parameters) # Ensure deterministic o/p
866
+ params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys)
867
+ if verbose > 9:
868
+ start_msg = f"[CV{progress_msg}] START {params_msg}"
869
+ print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
870
+
871
+ # Adjust length of sample weights
872
+ fit_params = fit_params if fit_params is not None else {}
873
+ fit_params = _check_method_params(X, params=fit_params, indices=train)
874
+ score_params = score_params if score_params is not None else {}
875
+ score_params_train = _check_method_params(X, params=score_params, indices=train)
876
+ score_params_test = _check_method_params(X, params=score_params, indices=test)
877
+
878
+ if parameters is not None:
879
+ # here we clone the parameters, since sometimes the parameters
880
+ # themselves might be estimators, e.g. when we search over different
881
+ # estimators in a pipeline.
882
+ # ref: https://github.com/scikit-learn/scikit-learn/pull/26786
883
+ estimator = estimator.set_params(**clone(parameters, safe=False))
884
+
885
+ start_time = time.time()
886
+
887
+ X_train, y_train = _safe_split(estimator, X, y, train)
888
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
889
+
890
+ result = {}
891
+ try:
892
+ if y_train is None:
893
+ estimator.fit(X_train, **fit_params)
894
+ else:
895
+ estimator.fit(X_train, y_train, **fit_params)
896
+
897
+ except Exception:
898
+ # Note fit time as time until error
899
+ fit_time = time.time() - start_time
900
+ score_time = 0.0
901
+ if error_score == "raise":
902
+ raise
903
+ elif isinstance(error_score, numbers.Number):
904
+ if isinstance(scorer, dict):
905
+ test_scores = {name: error_score for name in scorer}
906
+ if return_train_score:
907
+ train_scores = test_scores.copy()
908
+ else:
909
+ test_scores = error_score
910
+ if return_train_score:
911
+ train_scores = error_score
912
+ result["fit_error"] = format_exc()
913
+ else:
914
+ result["fit_error"] = None
915
+
916
+ fit_time = time.time() - start_time
917
+ test_scores = _score(
918
+ estimator, X_test, y_test, scorer, score_params_test, error_score
919
+ )
920
+ score_time = time.time() - start_time - fit_time
921
+ if return_train_score:
922
+ train_scores = _score(
923
+ estimator, X_train, y_train, scorer, score_params_train, error_score
924
+ )
925
+
926
+ if verbose > 1:
927
+ total_time = score_time + fit_time
928
+ end_msg = f"[CV{progress_msg}] END "
929
+ result_msg = params_msg + (";" if params_msg else "")
930
+ if verbose > 2:
931
+ if isinstance(test_scores, dict):
932
+ for scorer_name in sorted(test_scores):
933
+ result_msg += f" {scorer_name}: ("
934
+ if return_train_score:
935
+ scorer_scores = train_scores[scorer_name]
936
+ result_msg += f"train={scorer_scores:.3f}, "
937
+ result_msg += f"test={test_scores[scorer_name]:.3f})"
938
+ else:
939
+ result_msg += ", score="
940
+ if return_train_score:
941
+ result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})"
942
+ else:
943
+ result_msg += f"{test_scores:.3f}"
944
+ result_msg += f" total time={logger.short_format_time(total_time)}"
945
+
946
+ # Right align the result_msg
947
+ end_msg += "." * (80 - len(end_msg) - len(result_msg))
948
+ end_msg += result_msg
949
+ print(end_msg)
950
+
951
+ result["test_scores"] = test_scores
952
+ if return_train_score:
953
+ result["train_scores"] = train_scores
954
+ if return_n_test_samples:
955
+ result["n_test_samples"] = _num_samples(X_test)
956
+ if return_times:
957
+ result["fit_time"] = fit_time
958
+ result["score_time"] = score_time
959
+ if return_parameters:
960
+ result["parameters"] = parameters
961
+ if return_estimator:
962
+ result["estimator"] = estimator
963
+ return result
964
+
965
+
966
+ def _score(estimator, X_test, y_test, scorer, score_params, error_score="raise"):
967
+ """Compute the score(s) of an estimator on a given test set.
968
+
969
+ Will return a dict of floats if `scorer` is a dict, otherwise a single
970
+ float is returned.
971
+ """
972
+ if isinstance(scorer, dict):
973
+ # will cache method calls if needed. scorer() returns a dict
974
+ scorer = _MultimetricScorer(scorers=scorer, raise_exc=(error_score == "raise"))
975
+
976
+ score_params = {} if score_params is None else score_params
977
+
978
+ try:
979
+ if y_test is None:
980
+ scores = scorer(estimator, X_test, **score_params)
981
+ else:
982
+ scores = scorer(estimator, X_test, y_test, **score_params)
983
+ except Exception:
984
+ if isinstance(scorer, _MultimetricScorer):
985
+ # If `_MultimetricScorer` raises exception, the `error_score`
986
+ # parameter is equal to "raise".
987
+ raise
988
+ else:
989
+ if error_score == "raise":
990
+ raise
991
+ else:
992
+ scores = error_score
993
+ warnings.warn(
994
+ (
995
+ "Scoring failed. The score on this train-test partition for "
996
+ f"these parameters will be set to {error_score}. Details: \n"
997
+ f"{format_exc()}"
998
+ ),
999
+ UserWarning,
1000
+ )
1001
+
1002
+ # Check non-raised error messages in `_MultimetricScorer`
1003
+ if isinstance(scorer, _MultimetricScorer):
1004
+ exception_messages = [
1005
+ (name, str_e) for name, str_e in scores.items() if isinstance(str_e, str)
1006
+ ]
1007
+ if exception_messages:
1008
+ # error_score != "raise"
1009
+ for name, str_e in exception_messages:
1010
+ scores[name] = error_score
1011
+ warnings.warn(
1012
+ (
1013
+ "Scoring failed. The score on this train-test partition for "
1014
+ f"these parameters will be set to {error_score}. Details: \n"
1015
+ f"{str_e}"
1016
+ ),
1017
+ UserWarning,
1018
+ )
1019
+
1020
+ error_msg = "scoring must return a number, got %s (%s) instead. (scorer=%s)"
1021
+ if isinstance(scores, dict):
1022
+ for name, score in scores.items():
1023
+ if hasattr(score, "item"):
1024
+ with suppress(ValueError):
1025
+ # e.g. unwrap memmapped scalars
1026
+ score = score.item()
1027
+ if not isinstance(score, numbers.Number):
1028
+ raise ValueError(error_msg % (score, type(score), name))
1029
+ scores[name] = score
1030
+ else: # scalar
1031
+ if hasattr(scores, "item"):
1032
+ with suppress(ValueError):
1033
+ # e.g. unwrap memmapped scalars
1034
+ scores = scores.item()
1035
+ if not isinstance(scores, numbers.Number):
1036
+ raise ValueError(error_msg % (scores, type(scores), scorer))
1037
+ return scores
1038
+
1039
+
1040
+ @validate_params(
1041
+ {
1042
+ "estimator": [HasMethods(["fit", "predict"])],
1043
+ "X": ["array-like", "sparse matrix"],
1044
+ "y": ["array-like", None],
1045
+ "groups": ["array-like", None],
1046
+ "cv": ["cv_object"],
1047
+ "n_jobs": [Integral, None],
1048
+ "verbose": ["verbose"],
1049
+ "fit_params": [dict, None],
1050
+ "params": [dict, None],
1051
+ "pre_dispatch": [Integral, str, None],
1052
+ "method": [
1053
+ StrOptions(
1054
+ {
1055
+ "predict",
1056
+ "predict_proba",
1057
+ "predict_log_proba",
1058
+ "decision_function",
1059
+ }
1060
+ )
1061
+ ],
1062
+ },
1063
+ prefer_skip_nested_validation=False, # estimator is not validated yet
1064
+ )
1065
+ def cross_val_predict(
1066
+ estimator,
1067
+ X,
1068
+ y=None,
1069
+ *,
1070
+ groups=None,
1071
+ cv=None,
1072
+ n_jobs=None,
1073
+ verbose=0,
1074
+ fit_params=None,
1075
+ params=None,
1076
+ pre_dispatch="2*n_jobs",
1077
+ method="predict",
1078
+ ):
1079
+ """Generate cross-validated estimates for each input data point.
1080
+
1081
+ The data is split according to the cv parameter. Each sample belongs
1082
+ to exactly one test set, and its prediction is computed with an
1083
+ estimator fitted on the corresponding training set.
1084
+
1085
+ Passing these predictions into an evaluation metric may not be a valid
1086
+ way to measure generalization performance. Results can differ from
1087
+ :func:`cross_validate` and :func:`cross_val_score` unless all tests sets
1088
+ have equal size and the metric decomposes over samples.
1089
+
1090
+ Read more in the :ref:`User Guide <cross_validation>`.
1091
+
1092
+ Parameters
1093
+ ----------
1094
+ estimator : estimator
1095
+ The estimator instance to use to fit the data. It must implement a `fit`
1096
+ method and the method given by the `method` parameter.
1097
+
1098
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1099
+ The data to fit. Can be, for example a list, or an array at least 2d.
1100
+
1101
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
1102
+ default=None
1103
+ The target variable to try to predict in the case of
1104
+ supervised learning.
1105
+
1106
+ groups : array-like of shape (n_samples,), default=None
1107
+ Group labels for the samples used while splitting the dataset into
1108
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
1109
+ instance (e.g., :class:`GroupKFold`).
1110
+
1111
+ .. versionchanged:: 1.4
1112
+ ``groups`` can only be passed if metadata routing is not enabled
1113
+ via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
1114
+ is enabled, pass ``groups`` alongside other metadata via the ``params``
1115
+ argument instead. E.g.:
1116
+ ``cross_val_predict(..., params={'groups': groups})``.
1117
+
1118
+ cv : int, cross-validation generator or an iterable, default=None
1119
+ Determines the cross-validation splitting strategy.
1120
+ Possible inputs for cv are:
1121
+
1122
+ - None, to use the default 5-fold cross validation,
1123
+ - int, to specify the number of folds in a `(Stratified)KFold`,
1124
+ - :term:`CV splitter`,
1125
+ - An iterable that generates (train, test) splits as arrays of indices.
1126
+
1127
+ For int/None inputs, if the estimator is a classifier and ``y`` is
1128
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1129
+ other cases, :class:`KFold` is used. These splitters are instantiated
1130
+ with `shuffle=False` so the splits will be the same across calls.
1131
+
1132
+ Refer :ref:`User Guide <cross_validation>` for the various
1133
+ cross-validation strategies that can be used here.
1134
+
1135
+ .. versionchanged:: 0.22
1136
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1137
+
1138
+ n_jobs : int, default=None
1139
+ Number of jobs to run in parallel. Training the estimator and
1140
+ predicting are parallelized over the cross-validation splits.
1141
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1142
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1143
+ for more details.
1144
+
1145
+ verbose : int, default=0
1146
+ The verbosity level.
1147
+
1148
+ fit_params : dict, default=None
1149
+ Parameters to pass to the fit method of the estimator.
1150
+
1151
+ .. deprecated:: 1.4
1152
+ This parameter is deprecated and will be removed in version 1.6. Use
1153
+ ``params`` instead.
1154
+
1155
+ params : dict, default=None
1156
+ Parameters to pass to the underlying estimator's ``fit`` and the CV
1157
+ splitter.
1158
+
1159
+ .. versionadded:: 1.4
1160
+
1161
+ pre_dispatch : int or str, default='2*n_jobs'
1162
+ Controls the number of jobs that get dispatched during parallel
1163
+ execution. Reducing this number can be useful to avoid an
1164
+ explosion of memory consumption when more jobs get dispatched
1165
+ than CPUs can process. This parameter can be:
1166
+
1167
+ - None, in which case all the jobs are immediately
1168
+ created and spawned. Use this for lightweight and
1169
+ fast-running jobs, to avoid delays due to on-demand
1170
+ spawning of the jobs
1171
+
1172
+ - An int, giving the exact number of total jobs that are
1173
+ spawned
1174
+
1175
+ - A str, giving an expression as a function of n_jobs,
1176
+ as in '2*n_jobs'
1177
+
1178
+ method : {'predict', 'predict_proba', 'predict_log_proba', \
1179
+ 'decision_function'}, default='predict'
1180
+ The method to be invoked by `estimator`.
1181
+
1182
+ Returns
1183
+ -------
1184
+ predictions : ndarray
1185
+ This is the result of calling `method`. Shape:
1186
+
1187
+ - When `method` is 'predict' and in special case where `method` is
1188
+ 'decision_function' and the target is binary: (n_samples,)
1189
+ - When `method` is one of {'predict_proba', 'predict_log_proba',
1190
+ 'decision_function'} (unless special case above):
1191
+ (n_samples, n_classes)
1192
+ - If `estimator` is :term:`multioutput`, an extra dimension
1193
+ 'n_outputs' is added to the end of each shape above.
1194
+
1195
+ See Also
1196
+ --------
1197
+ cross_val_score : Calculate score for each CV split.
1198
+ cross_validate : Calculate one or more scores and timings for each CV
1199
+ split.
1200
+
1201
+ Notes
1202
+ -----
1203
+ In the case that one or more classes are absent in a training portion, a
1204
+ default score needs to be assigned to all instances for that class if
1205
+ ``method`` produces columns per class, as in {'decision_function',
1206
+ 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
1207
+ 0. In order to ensure finite output, we approximate negative infinity by
1208
+ the minimum finite float value for the dtype in other cases.
1209
+
1210
+ Examples
1211
+ --------
1212
+ >>> from sklearn import datasets, linear_model
1213
+ >>> from sklearn.model_selection import cross_val_predict
1214
+ >>> diabetes = datasets.load_diabetes()
1215
+ >>> X = diabetes.data[:150]
1216
+ >>> y = diabetes.target[:150]
1217
+ >>> lasso = linear_model.Lasso()
1218
+ >>> y_pred = cross_val_predict(lasso, X, y, cv=3)
1219
+ """
1220
+ params = _check_params_groups_deprecation(fit_params, params, groups)
1221
+ X, y = indexable(X, y)
1222
+
1223
+ if _routing_enabled():
1224
+ # For estimators, a MetadataRouter is created in get_metadata_routing
1225
+ # methods. For these router methods, we create the router to use
1226
+ # `process_routing` on it.
1227
+ router = (
1228
+ MetadataRouter(owner="cross_validate")
1229
+ .add(
1230
+ splitter=cv,
1231
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
1232
+ )
1233
+ .add(
1234
+ estimator=estimator,
1235
+ # TODO(SLEP6): also pass metadata for the predict method.
1236
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
1237
+ )
1238
+ )
1239
+ try:
1240
+ routed_params = process_routing(router, "fit", **params)
1241
+ except UnsetMetadataPassedError as e:
1242
+ # The default exception would mention `fit` since in the above
1243
+ # `process_routing` code, we pass `fit` as the caller. However,
1244
+ # the user is not calling `fit` directly, so we change the message
1245
+ # to make it more suitable for this case.
1246
+ unrequested_params = sorted(e.unrequested_params)
1247
+ raise UnsetMetadataPassedError(
1248
+ message=(
1249
+ f"{unrequested_params} are passed to `cross_val_predict` but are"
1250
+ " not explicitly set as requested or not requested for"
1251
+ f" cross_validate's estimator: {estimator.__class__.__name__} Call"
1252
+ " `.set_fit_request({{metadata}}=True)` on the estimator for"
1253
+ f" each metadata in {unrequested_params} that you want to use and"
1254
+ " `metadata=False` for not using it. See the Metadata Routing User"
1255
+ " guide <https://scikit-learn.org/stable/metadata_routing.html>"
1256
+ " for more information."
1257
+ ),
1258
+ unrequested_params=e.unrequested_params,
1259
+ routed_params=e.routed_params,
1260
+ )
1261
+ else:
1262
+ routed_params = Bunch()
1263
+ routed_params.splitter = Bunch(split={"groups": groups})
1264
+ routed_params.estimator = Bunch(fit=params)
1265
+
1266
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
1267
+ splits = list(cv.split(X, y, **routed_params.splitter.split))
1268
+
1269
+ test_indices = np.concatenate([test for _, test in splits])
1270
+ if not _check_is_permutation(test_indices, _num_samples(X)):
1271
+ raise ValueError("cross_val_predict only works for partitions")
1272
+
1273
+ # If classification methods produce multiple columns of output,
1274
+ # we need to manually encode classes to ensure consistent column ordering.
1275
+ encode = (
1276
+ method in ["decision_function", "predict_proba", "predict_log_proba"]
1277
+ and y is not None
1278
+ )
1279
+ if encode:
1280
+ y = np.asarray(y)
1281
+ if y.ndim == 1:
1282
+ le = LabelEncoder()
1283
+ y = le.fit_transform(y)
1284
+ elif y.ndim == 2:
1285
+ y_enc = np.zeros_like(y, dtype=int)
1286
+ for i_label in range(y.shape[1]):
1287
+ y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label])
1288
+ y = y_enc
1289
+
1290
+ # We clone the estimator to make sure that all the folds are
1291
+ # independent, and that it is pickle-able.
1292
+ parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
1293
+ predictions = parallel(
1294
+ delayed(_fit_and_predict)(
1295
+ clone(estimator),
1296
+ X,
1297
+ y,
1298
+ train,
1299
+ test,
1300
+ routed_params.estimator.fit,
1301
+ method,
1302
+ )
1303
+ for train, test in splits
1304
+ )
1305
+
1306
+ inv_test_indices = np.empty(len(test_indices), dtype=int)
1307
+ inv_test_indices[test_indices] = np.arange(len(test_indices))
1308
+
1309
+ if sp.issparse(predictions[0]):
1310
+ predictions = sp.vstack(predictions, format=predictions[0].format)
1311
+ elif encode and isinstance(predictions[0], list):
1312
+ # `predictions` is a list of method outputs from each fold.
1313
+ # If each of those is also a list, then treat this as a
1314
+ # multioutput-multiclass task. We need to separately concatenate
1315
+ # the method outputs for each label into an `n_labels` long list.
1316
+ n_labels = y.shape[1]
1317
+ concat_pred = []
1318
+ for i_label in range(n_labels):
1319
+ label_preds = np.concatenate([p[i_label] for p in predictions])
1320
+ concat_pred.append(label_preds)
1321
+ predictions = concat_pred
1322
+ else:
1323
+ predictions = np.concatenate(predictions)
1324
+
1325
+ if isinstance(predictions, list):
1326
+ return [p[inv_test_indices] for p in predictions]
1327
+ else:
1328
+ return predictions[inv_test_indices]
1329
+
1330
+
1331
+ def _fit_and_predict(estimator, X, y, train, test, fit_params, method):
1332
+ """Fit estimator and predict values for a given dataset split.
1333
+
1334
+ Read more in the :ref:`User Guide <cross_validation>`.
1335
+
1336
+ Parameters
1337
+ ----------
1338
+ estimator : estimator object implementing 'fit' and 'predict'
1339
+ The object to use to fit the data.
1340
+
1341
+ X : array-like of shape (n_samples, n_features)
1342
+ The data to fit.
1343
+
1344
+ .. versionchanged:: 0.20
1345
+ X is only required to be an object with finite length or shape now
1346
+
1347
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
1348
+ The target variable to try to predict in the case of
1349
+ supervised learning.
1350
+
1351
+ train : array-like of shape (n_train_samples,)
1352
+ Indices of training samples.
1353
+
1354
+ test : array-like of shape (n_test_samples,)
1355
+ Indices of test samples.
1356
+
1357
+ fit_params : dict or None
1358
+ Parameters that will be passed to ``estimator.fit``.
1359
+
1360
+ method : str
1361
+ Invokes the passed method name of the passed estimator.
1362
+
1363
+ Returns
1364
+ -------
1365
+ predictions : sequence
1366
+ Result of calling 'estimator.method'
1367
+ """
1368
+ # Adjust length of sample weights
1369
+ fit_params = fit_params if fit_params is not None else {}
1370
+ fit_params = _check_method_params(X, params=fit_params, indices=train)
1371
+
1372
+ X_train, y_train = _safe_split(estimator, X, y, train)
1373
+ X_test, _ = _safe_split(estimator, X, y, test, train)
1374
+
1375
+ if y_train is None:
1376
+ estimator.fit(X_train, **fit_params)
1377
+ else:
1378
+ estimator.fit(X_train, y_train, **fit_params)
1379
+ func = getattr(estimator, method)
1380
+ predictions = func(X_test)
1381
+
1382
+ encode = (
1383
+ method in ["decision_function", "predict_proba", "predict_log_proba"]
1384
+ and y is not None
1385
+ )
1386
+
1387
+ if encode:
1388
+ if isinstance(predictions, list):
1389
+ predictions = [
1390
+ _enforce_prediction_order(
1391
+ estimator.classes_[i_label],
1392
+ predictions[i_label],
1393
+ n_classes=len(set(y[:, i_label])),
1394
+ method=method,
1395
+ )
1396
+ for i_label in range(len(predictions))
1397
+ ]
1398
+ else:
1399
+ # A 2D y array should be a binary label indicator matrix
1400
+ n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]
1401
+ predictions = _enforce_prediction_order(
1402
+ estimator.classes_, predictions, n_classes, method
1403
+ )
1404
+ return predictions
1405
+
1406
+
1407
+ def _enforce_prediction_order(classes, predictions, n_classes, method):
1408
+ """Ensure that prediction arrays have correct column order
1409
+
1410
+ When doing cross-validation, if one or more classes are
1411
+ not present in the subset of data used for training,
1412
+ then the output prediction array might not have the same
1413
+ columns as other folds. Use the list of class names
1414
+ (assumed to be ints) to enforce the correct column order.
1415
+
1416
+ Note that `classes` is the list of classes in this fold
1417
+ (a subset of the classes in the full training set)
1418
+ and `n_classes` is the number of classes in the full training set.
1419
+ """
1420
+ if n_classes != len(classes):
1421
+ recommendation = (
1422
+ "To fix this, use a cross-validation "
1423
+ "technique resulting in properly "
1424
+ "stratified folds"
1425
+ )
1426
+ warnings.warn(
1427
+ "Number of classes in training fold ({}) does "
1428
+ "not match total number of classes ({}). "
1429
+ "Results may not be appropriate for your use case. "
1430
+ "{}".format(len(classes), n_classes, recommendation),
1431
+ RuntimeWarning,
1432
+ )
1433
+ if method == "decision_function":
1434
+ if predictions.ndim == 2 and predictions.shape[1] != len(classes):
1435
+ # This handles the case when the shape of predictions
1436
+ # does not match the number of classes used to train
1437
+ # it with. This case is found when sklearn.svm.SVC is
1438
+ # set to `decision_function_shape='ovo'`.
1439
+ raise ValueError(
1440
+ "Output shape {} of {} does not match "
1441
+ "number of classes ({}) in fold. "
1442
+ "Irregular decision_function outputs "
1443
+ "are not currently supported by "
1444
+ "cross_val_predict".format(predictions.shape, method, len(classes))
1445
+ )
1446
+ if len(classes) <= 2:
1447
+ # In this special case, `predictions` contains a 1D array.
1448
+ raise ValueError(
1449
+ "Only {} class/es in training fold, but {} "
1450
+ "in overall dataset. This "
1451
+ "is not supported for decision_function "
1452
+ "with imbalanced folds. {}".format(
1453
+ len(classes), n_classes, recommendation
1454
+ )
1455
+ )
1456
+
1457
+ float_min = np.finfo(predictions.dtype).min
1458
+ default_values = {
1459
+ "decision_function": float_min,
1460
+ "predict_log_proba": float_min,
1461
+ "predict_proba": 0,
1462
+ }
1463
+ predictions_for_all_classes = np.full(
1464
+ (_num_samples(predictions), n_classes),
1465
+ default_values[method],
1466
+ dtype=predictions.dtype,
1467
+ )
1468
+ predictions_for_all_classes[:, classes] = predictions
1469
+ predictions = predictions_for_all_classes
1470
+ return predictions
1471
+
1472
+
1473
+ def _check_is_permutation(indices, n_samples):
1474
+ """Check whether indices is a reordering of the array np.arange(n_samples)
1475
+
1476
+ Parameters
1477
+ ----------
1478
+ indices : ndarray
1479
+ int array to test
1480
+ n_samples : int
1481
+ number of expected elements
1482
+
1483
+ Returns
1484
+ -------
1485
+ is_partition : bool
1486
+ True iff sorted(indices) is np.arange(n)
1487
+ """
1488
+ if len(indices) != n_samples:
1489
+ return False
1490
+ hit = np.zeros(n_samples, dtype=bool)
1491
+ hit[indices] = True
1492
+ if not np.all(hit):
1493
+ return False
1494
+ return True
1495
+
1496
+
1497
+ @validate_params(
1498
+ {
1499
+ "estimator": [HasMethods("fit")],
1500
+ "X": ["array-like", "sparse matrix"],
1501
+ "y": ["array-like", None],
1502
+ "groups": ["array-like", None],
1503
+ "cv": ["cv_object"],
1504
+ "n_permutations": [Interval(Integral, 1, None, closed="left")],
1505
+ "n_jobs": [Integral, None],
1506
+ "random_state": ["random_state"],
1507
+ "verbose": ["verbose"],
1508
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
1509
+ "fit_params": [dict, None],
1510
+ },
1511
+ prefer_skip_nested_validation=False, # estimator is not validated yet
1512
+ )
1513
+ def permutation_test_score(
1514
+ estimator,
1515
+ X,
1516
+ y,
1517
+ *,
1518
+ groups=None,
1519
+ cv=None,
1520
+ n_permutations=100,
1521
+ n_jobs=None,
1522
+ random_state=0,
1523
+ verbose=0,
1524
+ scoring=None,
1525
+ fit_params=None,
1526
+ ):
1527
+ """Evaluate the significance of a cross-validated score with permutations.
1528
+
1529
+ Permutes targets to generate 'randomized data' and compute the empirical
1530
+ p-value against the null hypothesis that features and targets are
1531
+ independent.
1532
+
1533
+ The p-value represents the fraction of randomized data sets where the
1534
+ estimator performed as well or better than in the original data. A small
1535
+ p-value suggests that there is a real dependency between features and
1536
+ targets which has been used by the estimator to give good predictions.
1537
+ A large p-value may be due to lack of real dependency between features
1538
+ and targets or the estimator was not able to use the dependency to
1539
+ give good predictions.
1540
+
1541
+ Read more in the :ref:`User Guide <permutation_test_score>`.
1542
+
1543
+ Parameters
1544
+ ----------
1545
+ estimator : estimator object implementing 'fit'
1546
+ The object to use to fit the data.
1547
+
1548
+ X : array-like of shape at least 2D
1549
+ The data to fit.
1550
+
1551
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
1552
+ The target variable to try to predict in the case of
1553
+ supervised learning.
1554
+
1555
+ groups : array-like of shape (n_samples,), default=None
1556
+ Labels to constrain permutation within groups, i.e. ``y`` values
1557
+ are permuted among samples with the same group identifier.
1558
+ When not specified, ``y`` values are permuted among all samples.
1559
+
1560
+ When a grouped cross-validator is used, the group labels are
1561
+ also passed on to the ``split`` method of the cross-validator. The
1562
+ cross-validator uses them for grouping the samples while splitting
1563
+ the dataset into train/test set.
1564
+
1565
+ cv : int, cross-validation generator or an iterable, default=None
1566
+ Determines the cross-validation splitting strategy.
1567
+ Possible inputs for cv are:
1568
+
1569
+ - `None`, to use the default 5-fold cross validation,
1570
+ - int, to specify the number of folds in a `(Stratified)KFold`,
1571
+ - :term:`CV splitter`,
1572
+ - An iterable yielding (train, test) splits as arrays of indices.
1573
+
1574
+ For `int`/`None` inputs, if the estimator is a classifier and `y` is
1575
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1576
+ other cases, :class:`KFold` is used. These splitters are instantiated
1577
+ with `shuffle=False` so the splits will be the same across calls.
1578
+
1579
+ Refer :ref:`User Guide <cross_validation>` for the various
1580
+ cross-validation strategies that can be used here.
1581
+
1582
+ .. versionchanged:: 0.22
1583
+ `cv` default value if `None` changed from 3-fold to 5-fold.
1584
+
1585
+ n_permutations : int, default=100
1586
+ Number of times to permute ``y``.
1587
+
1588
+ n_jobs : int, default=None
1589
+ Number of jobs to run in parallel. Training the estimator and computing
1590
+ the cross-validated score are parallelized over the permutations.
1591
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1592
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1593
+ for more details.
1594
+
1595
+ random_state : int, RandomState instance or None, default=0
1596
+ Pass an int for reproducible output for permutation of
1597
+ ``y`` values among samples. See :term:`Glossary <random_state>`.
1598
+
1599
+ verbose : int, default=0
1600
+ The verbosity level.
1601
+
1602
+ scoring : str or callable, default=None
1603
+ A single str (see :ref:`scoring_parameter`) or a callable
1604
+ (see :ref:`scoring`) to evaluate the predictions on the test set.
1605
+
1606
+ If `None` the estimator's score method is used.
1607
+
1608
+ fit_params : dict, default=None
1609
+ Parameters to pass to the fit method of the estimator.
1610
+
1611
+ .. versionadded:: 0.24
1612
+
1613
+ Returns
1614
+ -------
1615
+ score : float
1616
+ The true score without permuting targets.
1617
+
1618
+ permutation_scores : array of shape (n_permutations,)
1619
+ The scores obtained for each permutations.
1620
+
1621
+ pvalue : float
1622
+ The p-value, which approximates the probability that the score would
1623
+ be obtained by chance. This is calculated as:
1624
+
1625
+ `(C + 1) / (n_permutations + 1)`
1626
+
1627
+ Where C is the number of permutations whose score >= the true score.
1628
+
1629
+ The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
1630
+
1631
+ Notes
1632
+ -----
1633
+ This function implements Test 1 in:
1634
+
1635
+ Ojala and Garriga. `Permutation Tests for Studying Classifier
1636
+ Performance
1637
+ <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The
1638
+ Journal of Machine Learning Research (2010) vol. 11
1639
+
1640
+ Examples
1641
+ --------
1642
+ >>> from sklearn.datasets import make_classification
1643
+ >>> from sklearn.linear_model import LogisticRegression
1644
+ >>> from sklearn.model_selection import permutation_test_score
1645
+ >>> X, y = make_classification(random_state=0)
1646
+ >>> estimator = LogisticRegression()
1647
+ >>> score, permutation_scores, pvalue = permutation_test_score(
1648
+ ... estimator, X, y, random_state=0
1649
+ ... )
1650
+ >>> print(f"Original Score: {score:.3f}")
1651
+ Original Score: 0.810
1652
+ >>> print(
1653
+ ... f"Permutation Scores: {permutation_scores.mean():.3f} +/- "
1654
+ ... f"{permutation_scores.std():.3f}"
1655
+ ... )
1656
+ Permutation Scores: 0.505 +/- 0.057
1657
+ >>> print(f"P-value: {pvalue:.3f}")
1658
+ P-value: 0.010
1659
+ """
1660
+ X, y, groups = indexable(X, y, groups)
1661
+
1662
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
1663
+ scorer = check_scoring(estimator, scoring=scoring)
1664
+ random_state = check_random_state(random_state)
1665
+
1666
+ # We clone the estimator to make sure that all the folds are
1667
+ # independent, and that it is pickle-able.
1668
+ score = _permutation_test_score(
1669
+ clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params
1670
+ )
1671
+ permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
1672
+ delayed(_permutation_test_score)(
1673
+ clone(estimator),
1674
+ X,
1675
+ _shuffle(y, groups, random_state),
1676
+ groups,
1677
+ cv,
1678
+ scorer,
1679
+ fit_params=fit_params,
1680
+ )
1681
+ for _ in range(n_permutations)
1682
+ )
1683
+ permutation_scores = np.array(permutation_scores)
1684
+ pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
1685
+ return score, permutation_scores, pvalue
1686
+
1687
+
1688
+ def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params):
1689
+ """Auxiliary function for permutation_test_score"""
1690
+ # Adjust length of sample weights
1691
+ fit_params = fit_params if fit_params is not None else {}
1692
+ avg_score = []
1693
+ for train, test in cv.split(X, y, groups):
1694
+ X_train, y_train = _safe_split(estimator, X, y, train)
1695
+ X_test, y_test = _safe_split(estimator, X, y, test, train)
1696
+ fit_params = _check_method_params(X, params=fit_params, indices=train)
1697
+ estimator.fit(X_train, y_train, **fit_params)
1698
+ avg_score.append(scorer(estimator, X_test, y_test))
1699
+ return np.mean(avg_score)
1700
+
1701
+
1702
+ def _shuffle(y, groups, random_state):
1703
+ """Return a shuffled copy of y eventually shuffle among same groups."""
1704
+ if groups is None:
1705
+ indices = random_state.permutation(len(y))
1706
+ else:
1707
+ indices = np.arange(len(groups))
1708
+ for group in np.unique(groups):
1709
+ this_mask = groups == group
1710
+ indices[this_mask] = random_state.permutation(indices[this_mask])
1711
+ return _safe_indexing(y, indices)
1712
+
1713
+
1714
+ @validate_params(
1715
+ {
1716
+ "estimator": [HasMethods(["fit"])],
1717
+ "X": ["array-like", "sparse matrix"],
1718
+ "y": ["array-like", None],
1719
+ "groups": ["array-like", None],
1720
+ "train_sizes": ["array-like"],
1721
+ "cv": ["cv_object"],
1722
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
1723
+ "exploit_incremental_learning": ["boolean"],
1724
+ "n_jobs": [Integral, None],
1725
+ "pre_dispatch": [Integral, str],
1726
+ "verbose": ["verbose"],
1727
+ "shuffle": ["boolean"],
1728
+ "random_state": ["random_state"],
1729
+ "error_score": [StrOptions({"raise"}), Real],
1730
+ "return_times": ["boolean"],
1731
+ "fit_params": [dict, None],
1732
+ },
1733
+ prefer_skip_nested_validation=False, # estimator is not validated yet
1734
+ )
1735
+ def learning_curve(
1736
+ estimator,
1737
+ X,
1738
+ y,
1739
+ *,
1740
+ groups=None,
1741
+ train_sizes=np.linspace(0.1, 1.0, 5),
1742
+ cv=None,
1743
+ scoring=None,
1744
+ exploit_incremental_learning=False,
1745
+ n_jobs=None,
1746
+ pre_dispatch="all",
1747
+ verbose=0,
1748
+ shuffle=False,
1749
+ random_state=None,
1750
+ error_score=np.nan,
1751
+ return_times=False,
1752
+ fit_params=None,
1753
+ ):
1754
+ """Learning curve.
1755
+
1756
+ Determines cross-validated training and test scores for different training
1757
+ set sizes.
1758
+
1759
+ A cross-validation generator splits the whole dataset k times in training
1760
+ and test data. Subsets of the training set with varying sizes will be used
1761
+ to train the estimator and a score for each training subset size and the
1762
+ test set will be computed. Afterwards, the scores will be averaged over
1763
+ all k runs for each training subset size.
1764
+
1765
+ Read more in the :ref:`User Guide <learning_curve>`.
1766
+
1767
+ Parameters
1768
+ ----------
1769
+ estimator : object type that implements the "fit" method
1770
+ An object of that type which is cloned for each validation. It must
1771
+ also implement "predict" unless `scoring` is a callable that doesn't
1772
+ rely on "predict" to compute a score.
1773
+
1774
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
1775
+ Training vector, where `n_samples` is the number of samples and
1776
+ `n_features` is the number of features.
1777
+
1778
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
1779
+ Target relative to X for classification or regression;
1780
+ None for unsupervised learning.
1781
+
1782
+ groups : array-like of shape (n_samples,), default=None
1783
+ Group labels for the samples used while splitting the dataset into
1784
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
1785
+ instance (e.g., :class:`GroupKFold`).
1786
+
1787
+ train_sizes : array-like of shape (n_ticks,), \
1788
+ default=np.linspace(0.1, 1.0, 5)
1789
+ Relative or absolute numbers of training examples that will be used to
1790
+ generate the learning curve. If the dtype is float, it is regarded as a
1791
+ fraction of the maximum size of the training set (that is determined
1792
+ by the selected validation method), i.e. it has to be within (0, 1].
1793
+ Otherwise it is interpreted as absolute sizes of the training sets.
1794
+ Note that for classification the number of samples usually have to
1795
+ be big enough to contain at least one sample from each class.
1796
+
1797
+ cv : int, cross-validation generator or an iterable, default=None
1798
+ Determines the cross-validation splitting strategy.
1799
+ Possible inputs for cv are:
1800
+
1801
+ - None, to use the default 5-fold cross validation,
1802
+ - int, to specify the number of folds in a `(Stratified)KFold`,
1803
+ - :term:`CV splitter`,
1804
+ - An iterable yielding (train, test) splits as arrays of indices.
1805
+
1806
+ For int/None inputs, if the estimator is a classifier and ``y`` is
1807
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
1808
+ other cases, :class:`KFold` is used. These splitters are instantiated
1809
+ with `shuffle=False` so the splits will be the same across calls.
1810
+
1811
+ Refer :ref:`User Guide <cross_validation>` for the various
1812
+ cross-validation strategies that can be used here.
1813
+
1814
+ .. versionchanged:: 0.22
1815
+ ``cv`` default value if None changed from 3-fold to 5-fold.
1816
+
1817
+ scoring : str or callable, default=None
1818
+ A str (see model evaluation documentation) or
1819
+ a scorer callable object / function with signature
1820
+ ``scorer(estimator, X, y)``.
1821
+
1822
+ exploit_incremental_learning : bool, default=False
1823
+ If the estimator supports incremental learning, this will be
1824
+ used to speed up fitting for different training set sizes.
1825
+
1826
+ n_jobs : int, default=None
1827
+ Number of jobs to run in parallel. Training the estimator and computing
1828
+ the score are parallelized over the different training and test sets.
1829
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
1830
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
1831
+ for more details.
1832
+
1833
+ pre_dispatch : int or str, default='all'
1834
+ Number of predispatched jobs for parallel execution (default is
1835
+ all). The option can reduce the allocated memory. The str can
1836
+ be an expression like '2*n_jobs'.
1837
+
1838
+ verbose : int, default=0
1839
+ Controls the verbosity: the higher, the more messages.
1840
+
1841
+ shuffle : bool, default=False
1842
+ Whether to shuffle training data before taking prefixes of it
1843
+ based on``train_sizes``.
1844
+
1845
+ random_state : int, RandomState instance or None, default=None
1846
+ Used when ``shuffle`` is True. Pass an int for reproducible
1847
+ output across multiple function calls.
1848
+ See :term:`Glossary <random_state>`.
1849
+
1850
+ error_score : 'raise' or numeric, default=np.nan
1851
+ Value to assign to the score if an error occurs in estimator fitting.
1852
+ If set to 'raise', the error is raised.
1853
+ If a numeric value is given, FitFailedWarning is raised.
1854
+
1855
+ .. versionadded:: 0.20
1856
+
1857
+ return_times : bool, default=False
1858
+ Whether to return the fit and score times.
1859
+
1860
+ fit_params : dict, default=None
1861
+ Parameters to pass to the fit method of the estimator.
1862
+
1863
+ .. versionadded:: 0.24
1864
+
1865
+ Returns
1866
+ -------
1867
+ train_sizes_abs : array of shape (n_unique_ticks,)
1868
+ Numbers of training examples that has been used to generate the
1869
+ learning curve. Note that the number of ticks might be less
1870
+ than n_ticks because duplicate entries will be removed.
1871
+
1872
+ train_scores : array of shape (n_ticks, n_cv_folds)
1873
+ Scores on training sets.
1874
+
1875
+ test_scores : array of shape (n_ticks, n_cv_folds)
1876
+ Scores on test set.
1877
+
1878
+ fit_times : array of shape (n_ticks, n_cv_folds)
1879
+ Times spent for fitting in seconds. Only present if ``return_times``
1880
+ is True.
1881
+
1882
+ score_times : array of shape (n_ticks, n_cv_folds)
1883
+ Times spent for scoring in seconds. Only present if ``return_times``
1884
+ is True.
1885
+
1886
+ Examples
1887
+ --------
1888
+ >>> from sklearn.datasets import make_classification
1889
+ >>> from sklearn.tree import DecisionTreeClassifier
1890
+ >>> from sklearn.model_selection import learning_curve
1891
+ >>> X, y = make_classification(n_samples=100, n_features=10, random_state=42)
1892
+ >>> tree = DecisionTreeClassifier(max_depth=4, random_state=42)
1893
+ >>> train_size_abs, train_scores, test_scores = learning_curve(
1894
+ ... tree, X, y, train_sizes=[0.3, 0.6, 0.9]
1895
+ ... )
1896
+ >>> for train_size, cv_train_scores, cv_test_scores in zip(
1897
+ ... train_size_abs, train_scores, test_scores
1898
+ ... ):
1899
+ ... print(f"{train_size} samples were used to train the model")
1900
+ ... print(f"The average train accuracy is {cv_train_scores.mean():.2f}")
1901
+ ... print(f"The average test accuracy is {cv_test_scores.mean():.2f}")
1902
+ 24 samples were used to train the model
1903
+ The average train accuracy is 1.00
1904
+ The average test accuracy is 0.85
1905
+ 48 samples were used to train the model
1906
+ The average train accuracy is 1.00
1907
+ The average test accuracy is 0.90
1908
+ 72 samples were used to train the model
1909
+ The average train accuracy is 1.00
1910
+ The average test accuracy is 0.93
1911
+ """
1912
+ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
1913
+ raise ValueError(
1914
+ "An estimator must support the partial_fit interface "
1915
+ "to exploit incremental learning"
1916
+ )
1917
+ X, y, groups = indexable(X, y, groups)
1918
+
1919
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
1920
+ # Store it as list as we will be iterating over the list multiple times
1921
+ cv_iter = list(cv.split(X, y, groups))
1922
+
1923
+ scorer = check_scoring(estimator, scoring=scoring)
1924
+
1925
+ n_max_training_samples = len(cv_iter[0][0])
1926
+ # Because the lengths of folds can be significantly different, it is
1927
+ # not guaranteed that we use all of the available training data when we
1928
+ # use the first 'n_max_training_samples' samples.
1929
+ train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples)
1930
+ n_unique_ticks = train_sizes_abs.shape[0]
1931
+ if verbose > 0:
1932
+ print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
1933
+
1934
+ parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
1935
+
1936
+ if shuffle:
1937
+ rng = check_random_state(random_state)
1938
+ cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
1939
+
1940
+ if exploit_incremental_learning:
1941
+ classes = np.unique(y) if is_classifier(estimator) else None
1942
+ out = parallel(
1943
+ delayed(_incremental_fit_estimator)(
1944
+ clone(estimator),
1945
+ X,
1946
+ y,
1947
+ classes,
1948
+ train,
1949
+ test,
1950
+ train_sizes_abs,
1951
+ scorer,
1952
+ return_times,
1953
+ error_score=error_score,
1954
+ fit_params=fit_params,
1955
+ )
1956
+ for train, test in cv_iter
1957
+ )
1958
+ out = np.asarray(out).transpose((2, 1, 0))
1959
+ else:
1960
+ train_test_proportions = []
1961
+ for train, test in cv_iter:
1962
+ for n_train_samples in train_sizes_abs:
1963
+ train_test_proportions.append((train[:n_train_samples], test))
1964
+
1965
+ results = parallel(
1966
+ delayed(_fit_and_score)(
1967
+ clone(estimator),
1968
+ X,
1969
+ y,
1970
+ scorer=scorer,
1971
+ train=train,
1972
+ test=test,
1973
+ verbose=verbose,
1974
+ parameters=None,
1975
+ fit_params=fit_params,
1976
+ # TODO(SLEP6): support score params here
1977
+ score_params=None,
1978
+ return_train_score=True,
1979
+ error_score=error_score,
1980
+ return_times=return_times,
1981
+ )
1982
+ for train, test in train_test_proportions
1983
+ )
1984
+ _warn_or_raise_about_fit_failures(results, error_score)
1985
+ results = _aggregate_score_dicts(results)
1986
+ train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
1987
+ test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
1988
+ out = [train_scores, test_scores]
1989
+
1990
+ if return_times:
1991
+ fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
1992
+ score_times = results["score_time"].reshape(-1, n_unique_ticks).T
1993
+ out.extend([fit_times, score_times])
1994
+
1995
+ ret = train_sizes_abs, out[0], out[1]
1996
+
1997
+ if return_times:
1998
+ ret = ret + (out[2], out[3])
1999
+
2000
+ return ret
2001
+
2002
+
2003
+ def _translate_train_sizes(train_sizes, n_max_training_samples):
2004
+ """Determine absolute sizes of training subsets and validate 'train_sizes'.
2005
+
2006
+ Examples:
2007
+ _translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
2008
+ _translate_train_sizes([5, 10], 10) -> [5, 10]
2009
+
2010
+ Parameters
2011
+ ----------
2012
+ train_sizes : array-like of shape (n_ticks,)
2013
+ Numbers of training examples that will be used to generate the
2014
+ learning curve. If the dtype is float, it is regarded as a
2015
+ fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
2016
+
2017
+ n_max_training_samples : int
2018
+ Maximum number of training samples (upper bound of 'train_sizes').
2019
+
2020
+ Returns
2021
+ -------
2022
+ train_sizes_abs : array of shape (n_unique_ticks,)
2023
+ Numbers of training examples that will be used to generate the
2024
+ learning curve. Note that the number of ticks might be less
2025
+ than n_ticks because duplicate entries will be removed.
2026
+ """
2027
+ train_sizes_abs = np.asarray(train_sizes)
2028
+ n_ticks = train_sizes_abs.shape[0]
2029
+ n_min_required_samples = np.min(train_sizes_abs)
2030
+ n_max_required_samples = np.max(train_sizes_abs)
2031
+ if np.issubdtype(train_sizes_abs.dtype, np.floating):
2032
+ if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
2033
+ raise ValueError(
2034
+ "train_sizes has been interpreted as fractions "
2035
+ "of the maximum number of training samples and "
2036
+ "must be within (0, 1], but is within [%f, %f]."
2037
+ % (n_min_required_samples, n_max_required_samples)
2038
+ )
2039
+ train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
2040
+ dtype=int, copy=False
2041
+ )
2042
+ train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples)
2043
+ else:
2044
+ if (
2045
+ n_min_required_samples <= 0
2046
+ or n_max_required_samples > n_max_training_samples
2047
+ ):
2048
+ raise ValueError(
2049
+ "train_sizes has been interpreted as absolute "
2050
+ "numbers of training samples and must be within "
2051
+ "(0, %d], but is within [%d, %d]."
2052
+ % (
2053
+ n_max_training_samples,
2054
+ n_min_required_samples,
2055
+ n_max_required_samples,
2056
+ )
2057
+ )
2058
+
2059
+ train_sizes_abs = np.unique(train_sizes_abs)
2060
+ if n_ticks > train_sizes_abs.shape[0]:
2061
+ warnings.warn(
2062
+ "Removed duplicate entries from 'train_sizes'. Number "
2063
+ "of ticks will be less than the size of "
2064
+ "'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks),
2065
+ RuntimeWarning,
2066
+ )
2067
+
2068
+ return train_sizes_abs
2069
+
2070
+
2071
+ def _incremental_fit_estimator(
2072
+ estimator,
2073
+ X,
2074
+ y,
2075
+ classes,
2076
+ train,
2077
+ test,
2078
+ train_sizes,
2079
+ scorer,
2080
+ return_times,
2081
+ error_score,
2082
+ fit_params,
2083
+ ):
2084
+ """Train estimator on training subsets incrementally and compute scores."""
2085
+ train_scores, test_scores, fit_times, score_times = [], [], [], []
2086
+ partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
2087
+ if fit_params is None:
2088
+ fit_params = {}
2089
+ if classes is None:
2090
+ partial_fit_func = partial(estimator.partial_fit, **fit_params)
2091
+ else:
2092
+ partial_fit_func = partial(estimator.partial_fit, classes=classes, **fit_params)
2093
+
2094
+ for n_train_samples, partial_train in partitions:
2095
+ train_subset = train[:n_train_samples]
2096
+ X_train, y_train = _safe_split(estimator, X, y, train_subset)
2097
+ X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train)
2098
+ X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
2099
+ start_fit = time.time()
2100
+ if y_partial_train is None:
2101
+ partial_fit_func(X_partial_train)
2102
+ else:
2103
+ partial_fit_func(X_partial_train, y_partial_train)
2104
+ fit_time = time.time() - start_fit
2105
+ fit_times.append(fit_time)
2106
+
2107
+ start_score = time.time()
2108
+
2109
+ # TODO(SLEP6): support score params in the following two calls
2110
+ test_scores.append(
2111
+ _score(
2112
+ estimator,
2113
+ X_test,
2114
+ y_test,
2115
+ scorer,
2116
+ score_params=None,
2117
+ error_score=error_score,
2118
+ )
2119
+ )
2120
+ train_scores.append(
2121
+ _score(
2122
+ estimator,
2123
+ X_train,
2124
+ y_train,
2125
+ scorer,
2126
+ score_params=None,
2127
+ error_score=error_score,
2128
+ )
2129
+ )
2130
+ score_time = time.time() - start_score
2131
+ score_times.append(score_time)
2132
+
2133
+ ret = (
2134
+ (train_scores, test_scores, fit_times, score_times)
2135
+ if return_times
2136
+ else (train_scores, test_scores)
2137
+ )
2138
+
2139
+ return np.array(ret).T
2140
+
2141
+
2142
+ @validate_params(
2143
+ {
2144
+ "estimator": [HasMethods(["fit"])],
2145
+ "X": ["array-like", "sparse matrix"],
2146
+ "y": ["array-like", None],
2147
+ "param_name": [str],
2148
+ "param_range": ["array-like"],
2149
+ "groups": ["array-like", None],
2150
+ "cv": ["cv_object"],
2151
+ "scoring": [StrOptions(set(get_scorer_names())), callable, None],
2152
+ "n_jobs": [Integral, None],
2153
+ "pre_dispatch": [Integral, str],
2154
+ "verbose": ["verbose"],
2155
+ "error_score": [StrOptions({"raise"}), Real],
2156
+ "fit_params": [dict, None],
2157
+ },
2158
+ prefer_skip_nested_validation=False, # estimator is not validated yet
2159
+ )
2160
+ def validation_curve(
2161
+ estimator,
2162
+ X,
2163
+ y,
2164
+ *,
2165
+ param_name,
2166
+ param_range,
2167
+ groups=None,
2168
+ cv=None,
2169
+ scoring=None,
2170
+ n_jobs=None,
2171
+ pre_dispatch="all",
2172
+ verbose=0,
2173
+ error_score=np.nan,
2174
+ fit_params=None,
2175
+ ):
2176
+ """Validation curve.
2177
+
2178
+ Determine training and test scores for varying parameter values.
2179
+
2180
+ Compute scores for an estimator with different values of a specified
2181
+ parameter. This is similar to grid search with one parameter. However, this
2182
+ will also compute training scores and is merely a utility for plotting the
2183
+ results.
2184
+
2185
+ Read more in the :ref:`User Guide <validation_curve>`.
2186
+
2187
+ Parameters
2188
+ ----------
2189
+ estimator : object type that implements the "fit" method
2190
+ An object of that type which is cloned for each validation. It must
2191
+ also implement "predict" unless `scoring` is a callable that doesn't
2192
+ rely on "predict" to compute a score.
2193
+
2194
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
2195
+ Training vector, where `n_samples` is the number of samples and
2196
+ `n_features` is the number of features.
2197
+
2198
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
2199
+ Target relative to X for classification or regression;
2200
+ None for unsupervised learning.
2201
+
2202
+ param_name : str
2203
+ Name of the parameter that will be varied.
2204
+
2205
+ param_range : array-like of shape (n_values,)
2206
+ The values of the parameter that will be evaluated.
2207
+
2208
+ groups : array-like of shape (n_samples,), default=None
2209
+ Group labels for the samples used while splitting the dataset into
2210
+ train/test set. Only used in conjunction with a "Group" :term:`cv`
2211
+ instance (e.g., :class:`GroupKFold`).
2212
+
2213
+ cv : int, cross-validation generator or an iterable, default=None
2214
+ Determines the cross-validation splitting strategy.
2215
+ Possible inputs for cv are:
2216
+
2217
+ - None, to use the default 5-fold cross validation,
2218
+ - int, to specify the number of folds in a `(Stratified)KFold`,
2219
+ - :term:`CV splitter`,
2220
+ - An iterable yielding (train, test) splits as arrays of indices.
2221
+
2222
+ For int/None inputs, if the estimator is a classifier and ``y`` is
2223
+ either binary or multiclass, :class:`StratifiedKFold` is used. In all
2224
+ other cases, :class:`KFold` is used. These splitters are instantiated
2225
+ with `shuffle=False` so the splits will be the same across calls.
2226
+
2227
+ Refer :ref:`User Guide <cross_validation>` for the various
2228
+ cross-validation strategies that can be used here.
2229
+
2230
+ .. versionchanged:: 0.22
2231
+ ``cv`` default value if None changed from 3-fold to 5-fold.
2232
+
2233
+ scoring : str or callable, default=None
2234
+ A str (see model evaluation documentation) or
2235
+ a scorer callable object / function with signature
2236
+ ``scorer(estimator, X, y)``.
2237
+
2238
+ n_jobs : int, default=None
2239
+ Number of jobs to run in parallel. Training the estimator and computing
2240
+ the score are parallelized over the combinations of each parameter
2241
+ value and each cross-validation split.
2242
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
2243
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
2244
+ for more details.
2245
+
2246
+ pre_dispatch : int or str, default='all'
2247
+ Number of predispatched jobs for parallel execution (default is
2248
+ all). The option can reduce the allocated memory. The str can
2249
+ be an expression like '2*n_jobs'.
2250
+
2251
+ verbose : int, default=0
2252
+ Controls the verbosity: the higher, the more messages.
2253
+
2254
+ error_score : 'raise' or numeric, default=np.nan
2255
+ Value to assign to the score if an error occurs in estimator fitting.
2256
+ If set to 'raise', the error is raised.
2257
+ If a numeric value is given, FitFailedWarning is raised.
2258
+
2259
+ .. versionadded:: 0.20
2260
+
2261
+ fit_params : dict, default=None
2262
+ Parameters to pass to the fit method of the estimator.
2263
+
2264
+ .. versionadded:: 0.24
2265
+
2266
+ Returns
2267
+ -------
2268
+ train_scores : array of shape (n_ticks, n_cv_folds)
2269
+ Scores on training sets.
2270
+
2271
+ test_scores : array of shape (n_ticks, n_cv_folds)
2272
+ Scores on test set.
2273
+
2274
+ Notes
2275
+ -----
2276
+ See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
2277
+
2278
+ Examples
2279
+ --------
2280
+ >>> import numpy as np
2281
+ >>> from sklearn.datasets import make_classification
2282
+ >>> from sklearn.model_selection import validation_curve
2283
+ >>> from sklearn.linear_model import LogisticRegression
2284
+ >>> X, y = make_classification(n_samples=1_000, random_state=0)
2285
+ >>> logistic_regression = LogisticRegression()
2286
+ >>> param_name, param_range = "C", np.logspace(-8, 3, 10)
2287
+ >>> train_scores, test_scores = validation_curve(
2288
+ ... logistic_regression, X, y, param_name=param_name, param_range=param_range
2289
+ ... )
2290
+ >>> print(f"The average train accuracy is {train_scores.mean():.2f}")
2291
+ The average train accuracy is 0.81
2292
+ >>> print(f"The average test accuracy is {test_scores.mean():.2f}")
2293
+ The average test accuracy is 0.81
2294
+ """
2295
+ X, y, groups = indexable(X, y, groups)
2296
+
2297
+ cv = check_cv(cv, y, classifier=is_classifier(estimator))
2298
+ scorer = check_scoring(estimator, scoring=scoring)
2299
+
2300
+ parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)
2301
+ results = parallel(
2302
+ delayed(_fit_and_score)(
2303
+ clone(estimator),
2304
+ X,
2305
+ y,
2306
+ scorer=scorer,
2307
+ train=train,
2308
+ test=test,
2309
+ verbose=verbose,
2310
+ parameters={param_name: v},
2311
+ fit_params=fit_params,
2312
+ # TODO(SLEP6): support score params here
2313
+ score_params=None,
2314
+ return_train_score=True,
2315
+ error_score=error_score,
2316
+ )
2317
+ # NOTE do not change order of iteration to allow one time cv splitters
2318
+ for train, test in cv.split(X, y, groups)
2319
+ for v in param_range
2320
+ )
2321
+ n_params = len(param_range)
2322
+
2323
+ results = _aggregate_score_dicts(results)
2324
+ train_scores = results["train_scores"].reshape(-1, n_params).T
2325
+ test_scores = results["test_scores"].reshape(-1, n_params).T
2326
+
2327
+ return train_scores, test_scores
2328
+
2329
+
2330
+ def _aggregate_score_dicts(scores):
2331
+ """Aggregate the list of dict to dict of np ndarray
2332
+
2333
+ The aggregated output of _aggregate_score_dicts will be a list of dict
2334
+ of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
2335
+ Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
2336
+
2337
+ Parameters
2338
+ ----------
2339
+
2340
+ scores : list of dict
2341
+ List of dicts of the scores for all scorers. This is a flat list,
2342
+ assumed originally to be of row major order.
2343
+
2344
+ Example
2345
+ -------
2346
+
2347
+ >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
2348
+ ... {'a': 10, 'b': 10}] # doctest: +SKIP
2349
+ >>> _aggregate_score_dicts(scores) # doctest: +SKIP
2350
+ {'a': array([1, 2, 3, 10]),
2351
+ 'b': array([10, 2, 3, 10])}
2352
+ """
2353
+ return {
2354
+ key: (
2355
+ np.asarray([score[key] for score in scores])
2356
+ if isinstance(scores[0][key], numbers.Number)
2357
+ else [score[key] for score in scores]
2358
+ )
2359
+ for key in scores[0]
2360
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc ADDED
Binary file (68.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc ADDED
Binary file (50.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc ADDED
Binary file (62.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Common utilities for testing model selection.
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ from sklearn.model_selection import KFold
8
+
9
+
10
+ class OneTimeSplitter:
11
+ """A wrapper to make KFold single entry cv iterator"""
12
+
13
+ def __init__(self, n_splits=4, n_samples=99):
14
+ self.n_splits = n_splits
15
+ self.n_samples = n_samples
16
+ self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples)))
17
+
18
+ def split(self, X=None, y=None, groups=None):
19
+ """Split can be called only once"""
20
+ for index in self.indices:
21
+ yield index
22
+
23
+ def get_n_splits(self, X=None, y=None, groups=None):
24
+ return self.n_splits
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from sklearn.datasets import load_iris
5
+ from sklearn.model_selection import (
6
+ LearningCurveDisplay,
7
+ ValidationCurveDisplay,
8
+ learning_curve,
9
+ validation_curve,
10
+ )
11
+ from sklearn.tree import DecisionTreeClassifier
12
+ from sklearn.utils import shuffle
13
+ from sklearn.utils._testing import assert_allclose, assert_array_equal
14
+
15
+
16
+ @pytest.fixture
17
+ def data():
18
+ return shuffle(*load_iris(return_X_y=True), random_state=0)
19
+
20
+
21
+ @pytest.mark.parametrize(
22
+ "params, err_type, err_msg",
23
+ [
24
+ ({"std_display_style": "invalid"}, ValueError, "Unknown std_display_style:"),
25
+ ({"score_type": "invalid"}, ValueError, "Unknown score_type:"),
26
+ ],
27
+ )
28
+ @pytest.mark.parametrize(
29
+ "CurveDisplay, specific_params",
30
+ [
31
+ (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
32
+ (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
33
+ ],
34
+ )
35
+ def test_curve_display_parameters_validation(
36
+ pyplot, data, params, err_type, err_msg, CurveDisplay, specific_params
37
+ ):
38
+ """Check that we raise a proper error when passing invalid parameters."""
39
+ X, y = data
40
+ estimator = DecisionTreeClassifier(random_state=0)
41
+
42
+ with pytest.raises(err_type, match=err_msg):
43
+ CurveDisplay.from_estimator(estimator, X, y, **specific_params, **params)
44
+
45
+
46
+ def test_learning_curve_display_default_usage(pyplot, data):
47
+ """Check the default usage of the LearningCurveDisplay class."""
48
+ X, y = data
49
+ estimator = DecisionTreeClassifier(random_state=0)
50
+
51
+ train_sizes = [0.3, 0.6, 0.9]
52
+ display = LearningCurveDisplay.from_estimator(
53
+ estimator, X, y, train_sizes=train_sizes
54
+ )
55
+
56
+ import matplotlib as mpl
57
+
58
+ assert display.errorbar_ is None
59
+
60
+ assert isinstance(display.lines_, list)
61
+ for line in display.lines_:
62
+ assert isinstance(line, mpl.lines.Line2D)
63
+
64
+ assert isinstance(display.fill_between_, list)
65
+ for fill in display.fill_between_:
66
+ assert isinstance(fill, mpl.collections.PolyCollection)
67
+ assert fill.get_alpha() == 0.5
68
+
69
+ assert display.score_name == "Score"
70
+ assert display.ax_.get_xlabel() == "Number of samples in the training set"
71
+ assert display.ax_.get_ylabel() == "Score"
72
+
73
+ _, legend_labels = display.ax_.get_legend_handles_labels()
74
+ assert legend_labels == ["Train", "Test"]
75
+
76
+ train_sizes_abs, train_scores, test_scores = learning_curve(
77
+ estimator, X, y, train_sizes=train_sizes
78
+ )
79
+
80
+ assert_array_equal(display.train_sizes, train_sizes_abs)
81
+ assert_allclose(display.train_scores, train_scores)
82
+ assert_allclose(display.test_scores, test_scores)
83
+
84
+
85
+ def test_validation_curve_display_default_usage(pyplot, data):
86
+ """Check the default usage of the ValidationCurveDisplay class."""
87
+ X, y = data
88
+ estimator = DecisionTreeClassifier(random_state=0)
89
+
90
+ param_name, param_range = "max_depth", [1, 3, 5]
91
+ display = ValidationCurveDisplay.from_estimator(
92
+ estimator, X, y, param_name=param_name, param_range=param_range
93
+ )
94
+
95
+ import matplotlib as mpl
96
+
97
+ assert display.errorbar_ is None
98
+
99
+ assert isinstance(display.lines_, list)
100
+ for line in display.lines_:
101
+ assert isinstance(line, mpl.lines.Line2D)
102
+
103
+ assert isinstance(display.fill_between_, list)
104
+ for fill in display.fill_between_:
105
+ assert isinstance(fill, mpl.collections.PolyCollection)
106
+ assert fill.get_alpha() == 0.5
107
+
108
+ assert display.score_name == "Score"
109
+ assert display.ax_.get_xlabel() == f"{param_name}"
110
+ assert display.ax_.get_ylabel() == "Score"
111
+
112
+ _, legend_labels = display.ax_.get_legend_handles_labels()
113
+ assert legend_labels == ["Train", "Test"]
114
+
115
+ train_scores, test_scores = validation_curve(
116
+ estimator, X, y, param_name=param_name, param_range=param_range
117
+ )
118
+
119
+ assert_array_equal(display.param_range, param_range)
120
+ assert_allclose(display.train_scores, train_scores)
121
+ assert_allclose(display.test_scores, test_scores)
122
+
123
+
124
+ @pytest.mark.parametrize(
125
+ "CurveDisplay, specific_params",
126
+ [
127
+ (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
128
+ (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
129
+ ],
130
+ )
131
+ def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params):
132
+ """Check the behaviour of the `negate_score` parameter calling `from_estimator` and
133
+ `plot`.
134
+ """
135
+ X, y = data
136
+ estimator = DecisionTreeClassifier(max_depth=1, random_state=0)
137
+
138
+ negate_score = False
139
+ display = CurveDisplay.from_estimator(
140
+ estimator, X, y, **specific_params, negate_score=negate_score
141
+ )
142
+
143
+ positive_scores = display.lines_[0].get_data()[1]
144
+ assert (positive_scores >= 0).all()
145
+ assert display.ax_.get_ylabel() == "Score"
146
+
147
+ negate_score = True
148
+ display = CurveDisplay.from_estimator(
149
+ estimator, X, y, **specific_params, negate_score=negate_score
150
+ )
151
+
152
+ negative_scores = display.lines_[0].get_data()[1]
153
+ assert (negative_scores <= 0).all()
154
+ assert_allclose(negative_scores, -positive_scores)
155
+ assert display.ax_.get_ylabel() == "Negative score"
156
+
157
+ negate_score = False
158
+ display = CurveDisplay.from_estimator(
159
+ estimator, X, y, **specific_params, negate_score=negate_score
160
+ )
161
+ assert display.ax_.get_ylabel() == "Score"
162
+ display.plot(negate_score=not negate_score)
163
+ assert display.ax_.get_ylabel() == "Score"
164
+ assert (display.lines_[0].get_data()[1] < 0).all()
165
+
166
+
167
+ @pytest.mark.parametrize(
168
+ "score_name, ylabel", [(None, "Score"), ("Accuracy", "Accuracy")]
169
+ )
170
+ @pytest.mark.parametrize(
171
+ "CurveDisplay, specific_params",
172
+ [
173
+ (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
174
+ (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
175
+ ],
176
+ )
177
+ def test_curve_display_score_name(
178
+ pyplot, data, score_name, ylabel, CurveDisplay, specific_params
179
+ ):
180
+ """Check that we can overwrite the default score name shown on the y-axis."""
181
+ X, y = data
182
+ estimator = DecisionTreeClassifier(random_state=0)
183
+
184
+ display = CurveDisplay.from_estimator(
185
+ estimator, X, y, **specific_params, score_name=score_name
186
+ )
187
+
188
+ assert display.ax_.get_ylabel() == ylabel
189
+ X, y = data
190
+ estimator = DecisionTreeClassifier(max_depth=1, random_state=0)
191
+
192
+ display = CurveDisplay.from_estimator(
193
+ estimator, X, y, **specific_params, score_name=score_name
194
+ )
195
+
196
+ assert display.score_name == ylabel
197
+
198
+
199
+ @pytest.mark.parametrize("std_display_style", (None, "errorbar"))
200
+ def test_learning_curve_display_score_type(pyplot, data, std_display_style):
201
+ """Check the behaviour of setting the `score_type` parameter."""
202
+ X, y = data
203
+ estimator = DecisionTreeClassifier(random_state=0)
204
+
205
+ train_sizes = [0.3, 0.6, 0.9]
206
+ train_sizes_abs, train_scores, test_scores = learning_curve(
207
+ estimator, X, y, train_sizes=train_sizes
208
+ )
209
+
210
+ score_type = "train"
211
+ display = LearningCurveDisplay.from_estimator(
212
+ estimator,
213
+ X,
214
+ y,
215
+ train_sizes=train_sizes,
216
+ score_type=score_type,
217
+ std_display_style=std_display_style,
218
+ )
219
+
220
+ _, legend_label = display.ax_.get_legend_handles_labels()
221
+ assert legend_label == ["Train"]
222
+
223
+ if std_display_style is None:
224
+ assert len(display.lines_) == 1
225
+ assert display.errorbar_ is None
226
+ x_data, y_data = display.lines_[0].get_data()
227
+ else:
228
+ assert display.lines_ is None
229
+ assert len(display.errorbar_) == 1
230
+ x_data, y_data = display.errorbar_[0].lines[0].get_data()
231
+
232
+ assert_array_equal(x_data, train_sizes_abs)
233
+ assert_allclose(y_data, train_scores.mean(axis=1))
234
+
235
+ score_type = "test"
236
+ display = LearningCurveDisplay.from_estimator(
237
+ estimator,
238
+ X,
239
+ y,
240
+ train_sizes=train_sizes,
241
+ score_type=score_type,
242
+ std_display_style=std_display_style,
243
+ )
244
+
245
+ _, legend_label = display.ax_.get_legend_handles_labels()
246
+ assert legend_label == ["Test"]
247
+
248
+ if std_display_style is None:
249
+ assert len(display.lines_) == 1
250
+ assert display.errorbar_ is None
251
+ x_data, y_data = display.lines_[0].get_data()
252
+ else:
253
+ assert display.lines_ is None
254
+ assert len(display.errorbar_) == 1
255
+ x_data, y_data = display.errorbar_[0].lines[0].get_data()
256
+
257
+ assert_array_equal(x_data, train_sizes_abs)
258
+ assert_allclose(y_data, test_scores.mean(axis=1))
259
+
260
+ score_type = "both"
261
+ display = LearningCurveDisplay.from_estimator(
262
+ estimator,
263
+ X,
264
+ y,
265
+ train_sizes=train_sizes,
266
+ score_type=score_type,
267
+ std_display_style=std_display_style,
268
+ )
269
+
270
+ _, legend_label = display.ax_.get_legend_handles_labels()
271
+ assert legend_label == ["Train", "Test"]
272
+
273
+ if std_display_style is None:
274
+ assert len(display.lines_) == 2
275
+ assert display.errorbar_ is None
276
+ x_data_train, y_data_train = display.lines_[0].get_data()
277
+ x_data_test, y_data_test = display.lines_[1].get_data()
278
+ else:
279
+ assert display.lines_ is None
280
+ assert len(display.errorbar_) == 2
281
+ x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data()
282
+ x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data()
283
+
284
+ assert_array_equal(x_data_train, train_sizes_abs)
285
+ assert_allclose(y_data_train, train_scores.mean(axis=1))
286
+ assert_array_equal(x_data_test, train_sizes_abs)
287
+ assert_allclose(y_data_test, test_scores.mean(axis=1))
288
+
289
+
290
+ @pytest.mark.parametrize("std_display_style", (None, "errorbar"))
291
+ def test_validation_curve_display_score_type(pyplot, data, std_display_style):
292
+ """Check the behaviour of setting the `score_type` parameter."""
293
+ X, y = data
294
+ estimator = DecisionTreeClassifier(random_state=0)
295
+
296
+ param_name, param_range = "max_depth", [1, 3, 5]
297
+ train_scores, test_scores = validation_curve(
298
+ estimator, X, y, param_name=param_name, param_range=param_range
299
+ )
300
+
301
+ score_type = "train"
302
+ display = ValidationCurveDisplay.from_estimator(
303
+ estimator,
304
+ X,
305
+ y,
306
+ param_name=param_name,
307
+ param_range=param_range,
308
+ score_type=score_type,
309
+ std_display_style=std_display_style,
310
+ )
311
+
312
+ _, legend_label = display.ax_.get_legend_handles_labels()
313
+ assert legend_label == ["Train"]
314
+
315
+ if std_display_style is None:
316
+ assert len(display.lines_) == 1
317
+ assert display.errorbar_ is None
318
+ x_data, y_data = display.lines_[0].get_data()
319
+ else:
320
+ assert display.lines_ is None
321
+ assert len(display.errorbar_) == 1
322
+ x_data, y_data = display.errorbar_[0].lines[0].get_data()
323
+
324
+ assert_array_equal(x_data, param_range)
325
+ assert_allclose(y_data, train_scores.mean(axis=1))
326
+
327
+ score_type = "test"
328
+ display = ValidationCurveDisplay.from_estimator(
329
+ estimator,
330
+ X,
331
+ y,
332
+ param_name=param_name,
333
+ param_range=param_range,
334
+ score_type=score_type,
335
+ std_display_style=std_display_style,
336
+ )
337
+
338
+ _, legend_label = display.ax_.get_legend_handles_labels()
339
+ assert legend_label == ["Test"]
340
+
341
+ if std_display_style is None:
342
+ assert len(display.lines_) == 1
343
+ assert display.errorbar_ is None
344
+ x_data, y_data = display.lines_[0].get_data()
345
+ else:
346
+ assert display.lines_ is None
347
+ assert len(display.errorbar_) == 1
348
+ x_data, y_data = display.errorbar_[0].lines[0].get_data()
349
+
350
+ assert_array_equal(x_data, param_range)
351
+ assert_allclose(y_data, test_scores.mean(axis=1))
352
+
353
+ score_type = "both"
354
+ display = ValidationCurveDisplay.from_estimator(
355
+ estimator,
356
+ X,
357
+ y,
358
+ param_name=param_name,
359
+ param_range=param_range,
360
+ score_type=score_type,
361
+ std_display_style=std_display_style,
362
+ )
363
+
364
+ _, legend_label = display.ax_.get_legend_handles_labels()
365
+ assert legend_label == ["Train", "Test"]
366
+
367
+ if std_display_style is None:
368
+ assert len(display.lines_) == 2
369
+ assert display.errorbar_ is None
370
+ x_data_train, y_data_train = display.lines_[0].get_data()
371
+ x_data_test, y_data_test = display.lines_[1].get_data()
372
+ else:
373
+ assert display.lines_ is None
374
+ assert len(display.errorbar_) == 2
375
+ x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data()
376
+ x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data()
377
+
378
+ assert_array_equal(x_data_train, param_range)
379
+ assert_allclose(y_data_train, train_scores.mean(axis=1))
380
+ assert_array_equal(x_data_test, param_range)
381
+ assert_allclose(y_data_test, test_scores.mean(axis=1))
382
+
383
+
384
+ @pytest.mark.parametrize(
385
+ "CurveDisplay, specific_params, expected_xscale",
386
+ [
387
+ (
388
+ ValidationCurveDisplay,
389
+ {"param_name": "max_depth", "param_range": np.arange(1, 5)},
390
+ "linear",
391
+ ),
392
+ (LearningCurveDisplay, {"train_sizes": np.linspace(0.1, 0.9, num=5)}, "linear"),
393
+ (
394
+ ValidationCurveDisplay,
395
+ {
396
+ "param_name": "max_depth",
397
+ "param_range": np.round(np.logspace(0, 2, num=5)).astype(np.int64),
398
+ },
399
+ "log",
400
+ ),
401
+ (LearningCurveDisplay, {"train_sizes": np.logspace(-1, 0, num=5)}, "log"),
402
+ ],
403
+ )
404
+ def test_curve_display_xscale_auto(
405
+ pyplot, data, CurveDisplay, specific_params, expected_xscale
406
+ ):
407
+ """Check the behaviour of the x-axis scaling depending on the data provided."""
408
+ X, y = data
409
+ estimator = DecisionTreeClassifier(random_state=0)
410
+
411
+ display = CurveDisplay.from_estimator(estimator, X, y, **specific_params)
412
+ assert display.ax_.get_xscale() == expected_xscale
413
+
414
+
415
+ @pytest.mark.parametrize(
416
+ "CurveDisplay, specific_params",
417
+ [
418
+ (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
419
+ (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
420
+ ],
421
+ )
422
+ def test_curve_display_std_display_style(pyplot, data, CurveDisplay, specific_params):
423
+ """Check the behaviour of the parameter `std_display_style`."""
424
+ X, y = data
425
+ estimator = DecisionTreeClassifier(random_state=0)
426
+
427
+ import matplotlib as mpl
428
+
429
+ std_display_style = None
430
+ display = CurveDisplay.from_estimator(
431
+ estimator,
432
+ X,
433
+ y,
434
+ **specific_params,
435
+ std_display_style=std_display_style,
436
+ )
437
+
438
+ assert len(display.lines_) == 2
439
+ for line in display.lines_:
440
+ assert isinstance(line, mpl.lines.Line2D)
441
+ assert display.errorbar_ is None
442
+ assert display.fill_between_ is None
443
+ _, legend_label = display.ax_.get_legend_handles_labels()
444
+ assert len(legend_label) == 2
445
+
446
+ std_display_style = "fill_between"
447
+ display = CurveDisplay.from_estimator(
448
+ estimator,
449
+ X,
450
+ y,
451
+ **specific_params,
452
+ std_display_style=std_display_style,
453
+ )
454
+
455
+ assert len(display.lines_) == 2
456
+ for line in display.lines_:
457
+ assert isinstance(line, mpl.lines.Line2D)
458
+ assert display.errorbar_ is None
459
+ assert len(display.fill_between_) == 2
460
+ for fill_between in display.fill_between_:
461
+ assert isinstance(fill_between, mpl.collections.PolyCollection)
462
+ _, legend_label = display.ax_.get_legend_handles_labels()
463
+ assert len(legend_label) == 2
464
+
465
+ std_display_style = "errorbar"
466
+ display = CurveDisplay.from_estimator(
467
+ estimator,
468
+ X,
469
+ y,
470
+ **specific_params,
471
+ std_display_style=std_display_style,
472
+ )
473
+
474
+ assert display.lines_ is None
475
+ assert len(display.errorbar_) == 2
476
+ for errorbar in display.errorbar_:
477
+ assert isinstance(errorbar, mpl.container.ErrorbarContainer)
478
+ assert display.fill_between_ is None
479
+ _, legend_label = display.ax_.get_legend_handles_labels()
480
+ assert len(legend_label) == 2
481
+
482
+
483
+ @pytest.mark.parametrize(
484
+ "CurveDisplay, specific_params",
485
+ [
486
+ (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
487
+ (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
488
+ ],
489
+ )
490
+ def test_curve_display_plot_kwargs(pyplot, data, CurveDisplay, specific_params):
491
+ """Check the behaviour of the different plotting keyword arguments: `line_kw`,
492
+ `fill_between_kw`, and `errorbar_kw`."""
493
+ X, y = data
494
+ estimator = DecisionTreeClassifier(random_state=0)
495
+
496
+ std_display_style = "fill_between"
497
+ line_kw = {"color": "red"}
498
+ fill_between_kw = {"color": "red", "alpha": 1.0}
499
+ display = CurveDisplay.from_estimator(
500
+ estimator,
501
+ X,
502
+ y,
503
+ **specific_params,
504
+ std_display_style=std_display_style,
505
+ line_kw=line_kw,
506
+ fill_between_kw=fill_between_kw,
507
+ )
508
+
509
+ assert display.lines_[0].get_color() == "red"
510
+ assert_allclose(
511
+ display.fill_between_[0].get_facecolor(),
512
+ [[1.0, 0.0, 0.0, 1.0]], # trust me, it's red
513
+ )
514
+
515
+ std_display_style = "errorbar"
516
+ errorbar_kw = {"color": "red"}
517
+ display = CurveDisplay.from_estimator(
518
+ estimator,
519
+ X,
520
+ y,
521
+ **specific_params,
522
+ std_display_style=std_display_style,
523
+ errorbar_kw=errorbar_kw,
524
+ )
525
+
526
+ assert display.errorbar_[0].lines[0].get_color() == "red"
527
+
528
+
529
+ # TODO(1.5): to be removed
530
+ def test_learning_curve_display_deprecate_log_scale(data, pyplot):
531
+ """Check that we warn for the deprecated parameter `log_scale`."""
532
+ X, y = data
533
+ estimator = DecisionTreeClassifier(random_state=0)
534
+
535
+ with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"):
536
+ display = LearningCurveDisplay.from_estimator(
537
+ estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=True
538
+ )
539
+
540
+ assert display.ax_.get_xscale() == "log"
541
+ assert display.ax_.get_yscale() == "linear"
542
+
543
+ with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"):
544
+ display = LearningCurveDisplay.from_estimator(
545
+ estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=False
546
+ )
547
+
548
+ assert display.ax_.get_xscale() == "linear"
549
+ assert display.ax_.get_yscale() == "linear"
550
+
551
+
552
+ @pytest.mark.parametrize(
553
+ "param_range, xscale",
554
+ [([5, 10, 15], "linear"), ([-50, 5, 50, 500], "symlog"), ([5, 50, 500], "log")],
555
+ )
556
+ def test_validation_curve_xscale_from_param_range_provided_as_a_list(
557
+ pyplot, data, param_range, xscale
558
+ ):
559
+ """Check the induced xscale from the provided param_range values."""
560
+ X, y = data
561
+ estimator = DecisionTreeClassifier(random_state=0)
562
+
563
+ param_name = "max_depth"
564
+ display = ValidationCurveDisplay.from_estimator(
565
+ estimator,
566
+ X,
567
+ y,
568
+ param_name=param_name,
569
+ param_range=param_range,
570
+ )
571
+
572
+ assert display.ax_.get_xscale() == xscale
573
+
574
+
575
+ @pytest.mark.parametrize(
576
+ "Display, params",
577
+ [
578
+ (LearningCurveDisplay, {}),
579
+ (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
580
+ ],
581
+ )
582
+ def test_subclassing_displays(pyplot, data, Display, params):
583
+ """Check that named constructors return the correct type when subclassed.
584
+
585
+ Non-regression test for:
586
+ https://github.com/scikit-learn/scikit-learn/pull/27675
587
+ """
588
+ X, y = data
589
+ estimator = DecisionTreeClassifier(random_state=0)
590
+
591
+ class SubclassOfDisplay(Display):
592
+ pass
593
+
594
+ display = SubclassOfDisplay.from_estimator(estimator, X, y, **params)
595
+ assert isinstance(display, SubclassOfDisplay)
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py ADDED
@@ -0,0 +1,2537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the search module"""
2
+
3
+ import pickle
4
+ import re
5
+ import sys
6
+ from collections.abc import Iterable, Sized
7
+ from functools import partial
8
+ from io import StringIO
9
+ from itertools import chain, product
10
+ from types import GeneratorType
11
+
12
+ import numpy as np
13
+ import pytest
14
+ from scipy.stats import bernoulli, expon, uniform
15
+
16
+ from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier
17
+ from sklearn.cluster import KMeans
18
+ from sklearn.datasets import (
19
+ make_blobs,
20
+ make_classification,
21
+ make_multilabel_classification,
22
+ )
23
+ from sklearn.ensemble import HistGradientBoostingClassifier
24
+ from sklearn.exceptions import FitFailedWarning
25
+ from sklearn.experimental import enable_halving_search_cv # noqa
26
+ from sklearn.impute import SimpleImputer
27
+ from sklearn.linear_model import (
28
+ LinearRegression,
29
+ Ridge,
30
+ SGDClassifier,
31
+ )
32
+ from sklearn.metrics import (
33
+ accuracy_score,
34
+ confusion_matrix,
35
+ f1_score,
36
+ make_scorer,
37
+ r2_score,
38
+ recall_score,
39
+ roc_auc_score,
40
+ )
41
+ from sklearn.metrics.pairwise import euclidean_distances
42
+ from sklearn.model_selection import (
43
+ GridSearchCV,
44
+ GroupKFold,
45
+ GroupShuffleSplit,
46
+ HalvingGridSearchCV,
47
+ KFold,
48
+ LeaveOneGroupOut,
49
+ LeavePGroupsOut,
50
+ ParameterGrid,
51
+ ParameterSampler,
52
+ RandomizedSearchCV,
53
+ StratifiedKFold,
54
+ StratifiedShuffleSplit,
55
+ train_test_split,
56
+ )
57
+ from sklearn.model_selection._search import BaseSearchCV
58
+ from sklearn.model_selection.tests.common import OneTimeSplitter
59
+ from sklearn.neighbors import KernelDensity, KNeighborsClassifier, LocalOutlierFactor
60
+ from sklearn.pipeline import Pipeline
61
+ from sklearn.svm import SVC, LinearSVC
62
+ from sklearn.tests.metadata_routing_common import (
63
+ ConsumingScorer,
64
+ _Registry,
65
+ check_recorded_metadata,
66
+ )
67
+ from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
68
+ from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
69
+ from sklearn.utils._testing import (
70
+ MinimalClassifier,
71
+ MinimalRegressor,
72
+ MinimalTransformer,
73
+ assert_allclose,
74
+ assert_almost_equal,
75
+ assert_array_almost_equal,
76
+ assert_array_equal,
77
+ ignore_warnings,
78
+ )
79
+ from sklearn.utils.fixes import CSR_CONTAINERS
80
+ from sklearn.utils.validation import _num_samples
81
+
82
+
83
+ # Neither of the following two estimators inherit from BaseEstimator,
84
+ # to test hyperparameter search on user-defined classifiers.
85
+ class MockClassifier:
86
+ """Dummy classifier to test the parameter search algorithms"""
87
+
88
+ def __init__(self, foo_param=0):
89
+ self.foo_param = foo_param
90
+
91
+ def fit(self, X, Y):
92
+ assert len(X) == len(Y)
93
+ self.classes_ = np.unique(Y)
94
+ return self
95
+
96
+ def predict(self, T):
97
+ return T.shape[0]
98
+
99
+ def transform(self, X):
100
+ return X + self.foo_param
101
+
102
+ def inverse_transform(self, X):
103
+ return X - self.foo_param
104
+
105
+ predict_proba = predict
106
+ predict_log_proba = predict
107
+ decision_function = predict
108
+
109
+ def score(self, X=None, Y=None):
110
+ if self.foo_param > 1:
111
+ score = 1.0
112
+ else:
113
+ score = 0.0
114
+ return score
115
+
116
+ def get_params(self, deep=False):
117
+ return {"foo_param": self.foo_param}
118
+
119
+ def set_params(self, **params):
120
+ self.foo_param = params["foo_param"]
121
+ return self
122
+
123
+
124
+ class LinearSVCNoScore(LinearSVC):
125
+ """A LinearSVC classifier that has no score method."""
126
+
127
+ @property
128
+ def score(self):
129
+ raise AttributeError
130
+
131
+
132
+ X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
133
+ y = np.array([1, 1, 2, 2])
134
+
135
+
136
+ def assert_grid_iter_equals_getitem(grid):
137
+ assert list(grid) == [grid[i] for i in range(len(grid))]
138
+
139
+
140
+ @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)])
141
+ @pytest.mark.parametrize(
142
+ "input, error_type, error_message",
143
+ [
144
+ (0, TypeError, r"Parameter .* a dict or a list, got: 0 of type int"),
145
+ ([{"foo": [0]}, 0], TypeError, r"Parameter .* is not a dict \(0\)"),
146
+ (
147
+ {"foo": 0},
148
+ TypeError,
149
+ r"Parameter (grid|distribution) for parameter 'foo' (is not|needs to be) "
150
+ r"(a list or a numpy array|iterable or a distribution).*",
151
+ ),
152
+ ],
153
+ )
154
+ def test_validate_parameter_input(klass, input, error_type, error_message):
155
+ with pytest.raises(error_type, match=error_message):
156
+ klass(input)
157
+
158
+
159
+ def test_parameter_grid():
160
+ # Test basic properties of ParameterGrid.
161
+ params1 = {"foo": [1, 2, 3]}
162
+ grid1 = ParameterGrid(params1)
163
+ assert isinstance(grid1, Iterable)
164
+ assert isinstance(grid1, Sized)
165
+ assert len(grid1) == 3
166
+ assert_grid_iter_equals_getitem(grid1)
167
+
168
+ params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]}
169
+ grid2 = ParameterGrid(params2)
170
+ assert len(grid2) == 6
171
+
172
+ # loop to assert we can iterate over the grid multiple times
173
+ for i in range(2):
174
+ # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
175
+ points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
176
+ assert points == set(
177
+ ("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"])
178
+ )
179
+ assert_grid_iter_equals_getitem(grid2)
180
+
181
+ # Special case: empty grid (useful to get default estimator settings)
182
+ empty = ParameterGrid({})
183
+ assert len(empty) == 1
184
+ assert list(empty) == [{}]
185
+ assert_grid_iter_equals_getitem(empty)
186
+ with pytest.raises(IndexError):
187
+ empty[1]
188
+
189
+ has_empty = ParameterGrid([{"C": [1, 10]}, {}, {"C": [0.5]}])
190
+ assert len(has_empty) == 4
191
+ assert list(has_empty) == [{"C": 1}, {"C": 10}, {}, {"C": 0.5}]
192
+ assert_grid_iter_equals_getitem(has_empty)
193
+
194
+
195
+ def test_grid_search():
196
+ # Test that the best estimator contains the right value for foo_param
197
+ clf = MockClassifier()
198
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3)
199
+ # make sure it selects the smallest parameter in case of ties
200
+ old_stdout = sys.stdout
201
+ sys.stdout = StringIO()
202
+ grid_search.fit(X, y)
203
+ sys.stdout = old_stdout
204
+ assert grid_search.best_estimator_.foo_param == 2
205
+
206
+ assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3])
207
+
208
+ # Smoke test the score etc:
209
+ grid_search.score(X, y)
210
+ grid_search.predict_proba(X)
211
+ grid_search.decision_function(X)
212
+ grid_search.transform(X)
213
+
214
+ # Test exception handling on scoring
215
+ grid_search.scoring = "sklearn"
216
+ with pytest.raises(ValueError):
217
+ grid_search.fit(X, y)
218
+
219
+
220
+ def test_grid_search_pipeline_steps():
221
+ # check that parameters that are estimators are cloned before fitting
222
+ pipe = Pipeline([("regressor", LinearRegression())])
223
+ param_grid = {"regressor": [LinearRegression(), Ridge()]}
224
+ grid_search = GridSearchCV(pipe, param_grid, cv=2)
225
+ grid_search.fit(X, y)
226
+ regressor_results = grid_search.cv_results_["param_regressor"]
227
+ assert isinstance(regressor_results[0], LinearRegression)
228
+ assert isinstance(regressor_results[1], Ridge)
229
+ assert not hasattr(regressor_results[0], "coef_")
230
+ assert not hasattr(regressor_results[1], "coef_")
231
+ assert regressor_results[0] is not grid_search.best_estimator_
232
+ assert regressor_results[1] is not grid_search.best_estimator_
233
+ # check that we didn't modify the parameter grid that was passed
234
+ assert not hasattr(param_grid["regressor"][0], "coef_")
235
+ assert not hasattr(param_grid["regressor"][1], "coef_")
236
+
237
+
238
+ @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
239
+ def test_SearchCV_with_fit_params(SearchCV):
240
+ X = np.arange(100).reshape(10, 10)
241
+ y = np.array([0] * 5 + [1] * 5)
242
+ clf = CheckingClassifier(expected_fit_params=["spam", "eggs"])
243
+ searcher = SearchCV(clf, {"foo_param": [1, 2, 3]}, cv=2, error_score="raise")
244
+
245
+ # The CheckingClassifier generates an assertion error if
246
+ # a parameter is missing or has length != len(X).
247
+ err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen."
248
+ with pytest.raises(AssertionError, match=err_msg):
249
+ searcher.fit(X, y, spam=np.ones(10))
250
+
251
+ err_msg = "Fit parameter spam has length 1; expected"
252
+ with pytest.raises(AssertionError, match=err_msg):
253
+ searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))
254
+ searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
255
+
256
+
257
+ @ignore_warnings
258
+ def test_grid_search_no_score():
259
+ # Test grid-search on classifier that has no score function.
260
+ clf = LinearSVC(dual="auto", random_state=0)
261
+ X, y = make_blobs(random_state=0, centers=2)
262
+ Cs = [0.1, 1, 10]
263
+ clf_no_score = LinearSVCNoScore(dual="auto", random_state=0)
264
+ grid_search = GridSearchCV(clf, {"C": Cs}, scoring="accuracy")
265
+ grid_search.fit(X, y)
266
+
267
+ grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}, scoring="accuracy")
268
+ # smoketest grid search
269
+ grid_search_no_score.fit(X, y)
270
+
271
+ # check that best params are equal
272
+ assert grid_search_no_score.best_params_ == grid_search.best_params_
273
+ # check that we can call score and that it gives the correct result
274
+ assert grid_search.score(X, y) == grid_search_no_score.score(X, y)
275
+
276
+ # giving no scoring function raises an error
277
+ grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs})
278
+ with pytest.raises(TypeError, match="no scoring"):
279
+ grid_search_no_score.fit([[1]])
280
+
281
+
282
+ def test_grid_search_score_method():
283
+ X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0)
284
+ clf = LinearSVC(dual="auto", random_state=0)
285
+ grid = {"C": [0.1]}
286
+
287
+ search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
288
+ search_accuracy = GridSearchCV(clf, grid, scoring="accuracy").fit(X, y)
289
+ search_no_score_method_auc = GridSearchCV(
290
+ LinearSVCNoScore(dual="auto"), grid, scoring="roc_auc"
291
+ ).fit(X, y)
292
+ search_auc = GridSearchCV(clf, grid, scoring="roc_auc").fit(X, y)
293
+
294
+ # Check warning only occurs in situation where behavior changed:
295
+ # estimator requires score method to compete with scoring parameter
296
+ score_no_scoring = search_no_scoring.score(X, y)
297
+ score_accuracy = search_accuracy.score(X, y)
298
+ score_no_score_auc = search_no_score_method_auc.score(X, y)
299
+ score_auc = search_auc.score(X, y)
300
+
301
+ # ensure the test is sane
302
+ assert score_auc < 1.0
303
+ assert score_accuracy < 1.0
304
+ assert score_auc != score_accuracy
305
+
306
+ assert_almost_equal(score_accuracy, score_no_scoring)
307
+ assert_almost_equal(score_auc, score_no_score_auc)
308
+
309
+
310
+ def test_grid_search_groups():
311
+ # Check if ValueError (when groups is None) propagates to GridSearchCV
312
+ # And also check if groups is correctly passed to the cv object
313
+ rng = np.random.RandomState(0)
314
+
315
+ X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
316
+ groups = rng.randint(0, 3, 15)
317
+
318
+ clf = LinearSVC(dual="auto", random_state=0)
319
+ grid = {"C": [1]}
320
+
321
+ group_cvs = [
322
+ LeaveOneGroupOut(),
323
+ LeavePGroupsOut(2),
324
+ GroupKFold(n_splits=3),
325
+ GroupShuffleSplit(),
326
+ ]
327
+ error_msg = "The 'groups' parameter should not be None."
328
+ for cv in group_cvs:
329
+ gs = GridSearchCV(clf, grid, cv=cv)
330
+ with pytest.raises(ValueError, match=error_msg):
331
+ gs.fit(X, y)
332
+ gs.fit(X, y, groups=groups)
333
+
334
+ non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
335
+ for cv in non_group_cvs:
336
+ gs = GridSearchCV(clf, grid, cv=cv)
337
+ # Should not raise an error
338
+ gs.fit(X, y)
339
+
340
+
341
+ def test_classes__property():
342
+ # Test that classes_ property matches best_estimator_.classes_
343
+ X = np.arange(100).reshape(10, 10)
344
+ y = np.array([0] * 5 + [1] * 5)
345
+ Cs = [0.1, 1, 10]
346
+
347
+ grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs})
348
+ grid_search.fit(X, y)
349
+ assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_)
350
+
351
+ # Test that regressors do not have a classes_ attribute
352
+ grid_search = GridSearchCV(Ridge(), {"alpha": [1.0, 2.0]})
353
+ grid_search.fit(X, y)
354
+ assert not hasattr(grid_search, "classes_")
355
+
356
+ # Test that the grid searcher has no classes_ attribute before it's fit
357
+ grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs})
358
+ assert not hasattr(grid_search, "classes_")
359
+
360
+ # Test that the grid searcher has no classes_ attribute without a refit
361
+ grid_search = GridSearchCV(
362
+ LinearSVC(dual="auto", random_state=0), {"C": Cs}, refit=False
363
+ )
364
+ grid_search.fit(X, y)
365
+ assert not hasattr(grid_search, "classes_")
366
+
367
+
368
+ def test_trivial_cv_results_attr():
369
+ # Test search over a "grid" with only one point.
370
+ clf = MockClassifier()
371
+ grid_search = GridSearchCV(clf, {"foo_param": [1]}, cv=3)
372
+ grid_search.fit(X, y)
373
+ assert hasattr(grid_search, "cv_results_")
374
+
375
+ random_search = RandomizedSearchCV(clf, {"foo_param": [0]}, n_iter=1, cv=3)
376
+ random_search.fit(X, y)
377
+ assert hasattr(grid_search, "cv_results_")
378
+
379
+
380
+ def test_no_refit():
381
+ # Test that GSCV can be used for model selection alone without refitting
382
+ clf = MockClassifier()
383
+ for scoring in [None, ["accuracy", "precision"]]:
384
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=False, cv=3)
385
+ grid_search.fit(X, y)
386
+ assert (
387
+ not hasattr(grid_search, "best_estimator_")
388
+ and hasattr(grid_search, "best_index_")
389
+ and hasattr(grid_search, "best_params_")
390
+ )
391
+
392
+ # Make sure the functions predict/transform etc. raise meaningful
393
+ # error messages
394
+ for fn_name in (
395
+ "predict",
396
+ "predict_proba",
397
+ "predict_log_proba",
398
+ "transform",
399
+ "inverse_transform",
400
+ ):
401
+ outer_msg = f"has no attribute '{fn_name}'"
402
+ inner_msg = (
403
+ f"`refit=False`. {fn_name} is available only after "
404
+ "refitting on the best parameters"
405
+ )
406
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
407
+ getattr(grid_search, fn_name)(X)
408
+
409
+ assert isinstance(exec_info.value.__cause__, AttributeError)
410
+ assert inner_msg in str(exec_info.value.__cause__)
411
+
412
+ # Test that an invalid refit param raises appropriate error messages
413
+ error_msg = (
414
+ "For multi-metric scoring, the parameter refit must be set to a scorer key"
415
+ )
416
+ for refit in [True, "recall", "accuracy"]:
417
+ with pytest.raises(ValueError, match=error_msg):
418
+ GridSearchCV(
419
+ clf, {}, refit=refit, scoring={"acc": "accuracy", "prec": "precision"}
420
+ ).fit(X, y)
421
+
422
+
423
+ def test_grid_search_error():
424
+ # Test that grid search will capture errors on data with different length
425
+ X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
426
+
427
+ clf = LinearSVC(dual="auto")
428
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
429
+ with pytest.raises(ValueError):
430
+ cv.fit(X_[:180], y_)
431
+
432
+
433
+ def test_grid_search_one_grid_point():
434
+ X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
435
+ param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
436
+
437
+ clf = SVC(gamma="auto")
438
+ cv = GridSearchCV(clf, param_dict)
439
+ cv.fit(X_, y_)
440
+
441
+ clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
442
+ clf.fit(X_, y_)
443
+
444
+ assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
445
+
446
+
447
+ def test_grid_search_when_param_grid_includes_range():
448
+ # Test that the best estimator contains the right value for foo_param
449
+ clf = MockClassifier()
450
+ grid_search = None
451
+ grid_search = GridSearchCV(clf, {"foo_param": range(1, 4)}, cv=3)
452
+ grid_search.fit(X, y)
453
+ assert grid_search.best_estimator_.foo_param == 2
454
+
455
+
456
+ def test_grid_search_bad_param_grid():
457
+ X, y = make_classification(n_samples=10, n_features=5, random_state=0)
458
+ param_dict = {"C": 1}
459
+ clf = SVC(gamma="auto")
460
+ error_msg = re.escape(
461
+ "Parameter grid for parameter 'C' needs to be a list or "
462
+ "a numpy array, but got 1 (of type int) instead. Single "
463
+ "values need to be wrapped in a list with one element."
464
+ )
465
+ search = GridSearchCV(clf, param_dict)
466
+ with pytest.raises(TypeError, match=error_msg):
467
+ search.fit(X, y)
468
+
469
+ param_dict = {"C": []}
470
+ clf = SVC()
471
+ error_msg = re.escape(
472
+ "Parameter grid for parameter 'C' need to be a non-empty sequence, got: []"
473
+ )
474
+ search = GridSearchCV(clf, param_dict)
475
+ with pytest.raises(ValueError, match=error_msg):
476
+ search.fit(X, y)
477
+
478
+ param_dict = {"C": "1,2,3"}
479
+ clf = SVC(gamma="auto")
480
+ error_msg = re.escape(
481
+ "Parameter grid for parameter 'C' needs to be a list or a numpy array, "
482
+ "but got '1,2,3' (of type str) instead. Single values need to be "
483
+ "wrapped in a list with one element."
484
+ )
485
+ search = GridSearchCV(clf, param_dict)
486
+ with pytest.raises(TypeError, match=error_msg):
487
+ search.fit(X, y)
488
+
489
+ param_dict = {"C": np.ones((3, 2))}
490
+ clf = SVC()
491
+ search = GridSearchCV(clf, param_dict)
492
+ with pytest.raises(ValueError):
493
+ search.fit(X, y)
494
+
495
+
496
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
497
+ def test_grid_search_sparse(csr_container):
498
+ # Test that grid search works with both dense and sparse matrices
499
+ X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
500
+
501
+ clf = LinearSVC(dual="auto")
502
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
503
+ cv.fit(X_[:180], y_[:180])
504
+ y_pred = cv.predict(X_[180:])
505
+ C = cv.best_estimator_.C
506
+
507
+ X_ = csr_container(X_)
508
+ clf = LinearSVC(dual="auto")
509
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
510
+ cv.fit(X_[:180].tocoo(), y_[:180])
511
+ y_pred2 = cv.predict(X_[180:])
512
+ C2 = cv.best_estimator_.C
513
+
514
+ assert np.mean(y_pred == y_pred2) >= 0.9
515
+ assert C == C2
516
+
517
+
518
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
519
+ def test_grid_search_sparse_scoring(csr_container):
520
+ X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
521
+
522
+ clf = LinearSVC(dual="auto")
523
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1")
524
+ cv.fit(X_[:180], y_[:180])
525
+ y_pred = cv.predict(X_[180:])
526
+ C = cv.best_estimator_.C
527
+
528
+ X_ = csr_container(X_)
529
+ clf = LinearSVC(dual="auto")
530
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1")
531
+ cv.fit(X_[:180], y_[:180])
532
+ y_pred2 = cv.predict(X_[180:])
533
+ C2 = cv.best_estimator_.C
534
+
535
+ assert_array_equal(y_pred, y_pred2)
536
+ assert C == C2
537
+ # Smoke test the score
538
+ # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
539
+ # cv.score(X_[:180], y[:180]))
540
+
541
+ # test loss where greater is worse
542
+ def f1_loss(y_true_, y_pred_):
543
+ return -f1_score(y_true_, y_pred_)
544
+
545
+ F1Loss = make_scorer(f1_loss, greater_is_better=False)
546
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring=F1Loss)
547
+ cv.fit(X_[:180], y_[:180])
548
+ y_pred3 = cv.predict(X_[180:])
549
+ C3 = cv.best_estimator_.C
550
+
551
+ assert C == C3
552
+ assert_array_equal(y_pred, y_pred3)
553
+
554
+
555
+ def test_grid_search_precomputed_kernel():
556
+ # Test that grid search works when the input features are given in the
557
+ # form of a precomputed kernel matrix
558
+ X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
559
+
560
+ # compute the training kernel matrix corresponding to the linear kernel
561
+ K_train = np.dot(X_[:180], X_[:180].T)
562
+ y_train = y_[:180]
563
+
564
+ clf = SVC(kernel="precomputed")
565
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
566
+ cv.fit(K_train, y_train)
567
+
568
+ assert cv.best_score_ >= 0
569
+
570
+ # compute the test kernel matrix
571
+ K_test = np.dot(X_[180:], X_[:180].T)
572
+ y_test = y_[180:]
573
+
574
+ y_pred = cv.predict(K_test)
575
+
576
+ assert np.mean(y_pred == y_test) >= 0
577
+
578
+ # test error is raised when the precomputed kernel is not array-like
579
+ # or sparse
580
+ with pytest.raises(ValueError):
581
+ cv.fit(K_train.tolist(), y_train)
582
+
583
+
584
+ def test_grid_search_precomputed_kernel_error_nonsquare():
585
+ # Test that grid search returns an error with a non-square precomputed
586
+ # training kernel matrix
587
+ K_train = np.zeros((10, 20))
588
+ y_train = np.ones((10,))
589
+ clf = SVC(kernel="precomputed")
590
+ cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
591
+ with pytest.raises(ValueError):
592
+ cv.fit(K_train, y_train)
593
+
594
+
595
+ class BrokenClassifier(BaseEstimator):
596
+ """Broken classifier that cannot be fit twice"""
597
+
598
+ def __init__(self, parameter=None):
599
+ self.parameter = parameter
600
+
601
+ def fit(self, X, y):
602
+ assert not hasattr(self, "has_been_fit_")
603
+ self.has_been_fit_ = True
604
+
605
+ def predict(self, X):
606
+ return np.zeros(X.shape[0])
607
+
608
+
609
+ @ignore_warnings
610
+ def test_refit():
611
+ # Regression test for bug in refitting
612
+ # Simulates re-fitting a broken estimator; this used to break with
613
+ # sparse SVMs.
614
+ X = np.arange(100).reshape(10, 10)
615
+ y = np.array([0] * 5 + [1] * 5)
616
+
617
+ clf = GridSearchCV(
618
+ BrokenClassifier(), [{"parameter": [0, 1]}], scoring="precision", refit=True
619
+ )
620
+ clf.fit(X, y)
621
+
622
+
623
+ def test_refit_callable():
624
+ """
625
+ Test refit=callable, which adds flexibility in identifying the
626
+ "best" estimator.
627
+ """
628
+
629
+ def refit_callable(cv_results):
630
+ """
631
+ A dummy function tests `refit=callable` interface.
632
+ Return the index of a model that has the least
633
+ `mean_test_score`.
634
+ """
635
+ # Fit a dummy clf with `refit=True` to get a list of keys in
636
+ # clf.cv_results_.
637
+ X, y = make_classification(n_samples=100, n_features=4, random_state=42)
638
+ clf = GridSearchCV(
639
+ LinearSVC(dual="auto", random_state=42),
640
+ {"C": [0.01, 0.1, 1]},
641
+ scoring="precision",
642
+ refit=True,
643
+ )
644
+ clf.fit(X, y)
645
+ # Ensure that `best_index_ != 0` for this dummy clf
646
+ assert clf.best_index_ != 0
647
+
648
+ # Assert every key matches those in `cv_results`
649
+ for key in clf.cv_results_.keys():
650
+ assert key in cv_results
651
+
652
+ return cv_results["mean_test_score"].argmin()
653
+
654
+ X, y = make_classification(n_samples=100, n_features=4, random_state=42)
655
+ clf = GridSearchCV(
656
+ LinearSVC(dual="auto", random_state=42),
657
+ {"C": [0.01, 0.1, 1]},
658
+ scoring="precision",
659
+ refit=refit_callable,
660
+ )
661
+ clf.fit(X, y)
662
+
663
+ assert clf.best_index_ == 0
664
+ # Ensure `best_score_` is disabled when using `refit=callable`
665
+ assert not hasattr(clf, "best_score_")
666
+
667
+
668
+ def test_refit_callable_invalid_type():
669
+ """
670
+ Test implementation catches the errors when 'best_index_' returns an
671
+ invalid result.
672
+ """
673
+
674
+ def refit_callable_invalid_type(cv_results):
675
+ """
676
+ A dummy function tests when returned 'best_index_' is not integer.
677
+ """
678
+ return None
679
+
680
+ X, y = make_classification(n_samples=100, n_features=4, random_state=42)
681
+
682
+ clf = GridSearchCV(
683
+ LinearSVC(dual="auto", random_state=42),
684
+ {"C": [0.1, 1]},
685
+ scoring="precision",
686
+ refit=refit_callable_invalid_type,
687
+ )
688
+ with pytest.raises(TypeError, match="best_index_ returned is not an integer"):
689
+ clf.fit(X, y)
690
+
691
+
692
+ @pytest.mark.parametrize("out_bound_value", [-1, 2])
693
+ @pytest.mark.parametrize("search_cv", [RandomizedSearchCV, GridSearchCV])
694
+ def test_refit_callable_out_bound(out_bound_value, search_cv):
695
+ """
696
+ Test implementation catches the errors when 'best_index_' returns an
697
+ out of bound result.
698
+ """
699
+
700
+ def refit_callable_out_bound(cv_results):
701
+ """
702
+ A dummy function tests when returned 'best_index_' is out of bounds.
703
+ """
704
+ return out_bound_value
705
+
706
+ X, y = make_classification(n_samples=100, n_features=4, random_state=42)
707
+
708
+ clf = search_cv(
709
+ LinearSVC(dual="auto", random_state=42),
710
+ {"C": [0.1, 1]},
711
+ scoring="precision",
712
+ refit=refit_callable_out_bound,
713
+ )
714
+ with pytest.raises(IndexError, match="best_index_ index out of range"):
715
+ clf.fit(X, y)
716
+
717
+
718
+ def test_refit_callable_multi_metric():
719
+ """
720
+ Test refit=callable in multiple metric evaluation setting
721
+ """
722
+
723
+ def refit_callable(cv_results):
724
+ """
725
+ A dummy function tests `refit=callable` interface.
726
+ Return the index of a model that has the least
727
+ `mean_test_prec`.
728
+ """
729
+ assert "mean_test_prec" in cv_results
730
+ return cv_results["mean_test_prec"].argmin()
731
+
732
+ X, y = make_classification(n_samples=100, n_features=4, random_state=42)
733
+ scoring = {"Accuracy": make_scorer(accuracy_score), "prec": "precision"}
734
+ clf = GridSearchCV(
735
+ LinearSVC(dual="auto", random_state=42),
736
+ {"C": [0.01, 0.1, 1]},
737
+ scoring=scoring,
738
+ refit=refit_callable,
739
+ )
740
+ clf.fit(X, y)
741
+
742
+ assert clf.best_index_ == 0
743
+ # Ensure `best_score_` is disabled when using `refit=callable`
744
+ assert not hasattr(clf, "best_score_")
745
+
746
+
747
+ def test_gridsearch_nd():
748
+ # Pass X as list in GridSearchCV
749
+ X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
750
+ y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
751
+
752
+ def check_X(x):
753
+ return x.shape[1:] == (5, 3, 2)
754
+
755
+ def check_y(x):
756
+ return x.shape[1:] == (7, 11)
757
+
758
+ clf = CheckingClassifier(
759
+ check_X=check_X,
760
+ check_y=check_y,
761
+ methods_to_check=["fit"],
762
+ )
763
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]})
764
+ grid_search.fit(X_4d, y_3d).score(X, y)
765
+ assert hasattr(grid_search, "cv_results_")
766
+
767
+
768
+ def test_X_as_list():
769
+ # Pass X as list in GridSearchCV
770
+ X = np.arange(100).reshape(10, 10)
771
+ y = np.array([0] * 5 + [1] * 5)
772
+
773
+ clf = CheckingClassifier(
774
+ check_X=lambda x: isinstance(x, list),
775
+ methods_to_check=["fit"],
776
+ )
777
+ cv = KFold(n_splits=3)
778
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv)
779
+ grid_search.fit(X.tolist(), y).score(X, y)
780
+ assert hasattr(grid_search, "cv_results_")
781
+
782
+
783
+ def test_y_as_list():
784
+ # Pass y as list in GridSearchCV
785
+ X = np.arange(100).reshape(10, 10)
786
+ y = np.array([0] * 5 + [1] * 5)
787
+
788
+ clf = CheckingClassifier(
789
+ check_y=lambda x: isinstance(x, list),
790
+ methods_to_check=["fit"],
791
+ )
792
+ cv = KFold(n_splits=3)
793
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv)
794
+ grid_search.fit(X, y.tolist()).score(X, y)
795
+ assert hasattr(grid_search, "cv_results_")
796
+
797
+
798
+ @ignore_warnings
799
+ def test_pandas_input():
800
+ # check cross_val_score doesn't destroy pandas dataframe
801
+ types = [(MockDataFrame, MockDataFrame)]
802
+ try:
803
+ from pandas import DataFrame, Series
804
+
805
+ types.append((DataFrame, Series))
806
+ except ImportError:
807
+ pass
808
+
809
+ X = np.arange(100).reshape(10, 10)
810
+ y = np.array([0] * 5 + [1] * 5)
811
+
812
+ for InputFeatureType, TargetType in types:
813
+ # X dataframe, y series
814
+ X_df, y_ser = InputFeatureType(X), TargetType(y)
815
+
816
+ def check_df(x):
817
+ return isinstance(x, InputFeatureType)
818
+
819
+ def check_series(x):
820
+ return isinstance(x, TargetType)
821
+
822
+ clf = CheckingClassifier(check_X=check_df, check_y=check_series)
823
+
824
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]})
825
+ grid_search.fit(X_df, y_ser).score(X_df, y_ser)
826
+ grid_search.predict(X_df)
827
+ assert hasattr(grid_search, "cv_results_")
828
+
829
+
830
+ def test_unsupervised_grid_search():
831
+ # test grid-search with unsupervised estimator
832
+ X, y = make_blobs(n_samples=50, random_state=0)
833
+ km = KMeans(random_state=0, init="random", n_init=1)
834
+
835
+ # Multi-metric evaluation unsupervised
836
+ scoring = ["adjusted_rand_score", "fowlkes_mallows_score"]
837
+ for refit in ["adjusted_rand_score", "fowlkes_mallows_score"]:
838
+ grid_search = GridSearchCV(
839
+ km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit
840
+ )
841
+ grid_search.fit(X, y)
842
+ # Both ARI and FMS can find the right number :)
843
+ assert grid_search.best_params_["n_clusters"] == 3
844
+
845
+ # Single metric evaluation unsupervised
846
+ grid_search = GridSearchCV(
847
+ km, param_grid=dict(n_clusters=[2, 3, 4]), scoring="fowlkes_mallows_score"
848
+ )
849
+ grid_search.fit(X, y)
850
+ assert grid_search.best_params_["n_clusters"] == 3
851
+
852
+ # Now without a score, and without y
853
+ grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
854
+ grid_search.fit(X)
855
+ assert grid_search.best_params_["n_clusters"] == 4
856
+
857
+
858
+ def test_gridsearch_no_predict():
859
+ # test grid-search with an estimator without predict.
860
+ # slight duplication of a test from KDE
861
+ def custom_scoring(estimator, X):
862
+ return 42 if estimator.bandwidth == 0.1 else 0
863
+
864
+ X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]])
865
+ search = GridSearchCV(
866
+ KernelDensity(),
867
+ param_grid=dict(bandwidth=[0.01, 0.1, 1]),
868
+ scoring=custom_scoring,
869
+ )
870
+ search.fit(X)
871
+ assert search.best_params_["bandwidth"] == 0.1
872
+ assert search.best_score_ == 42
873
+
874
+
875
+ def test_param_sampler():
876
+ # test basic properties of param sampler
877
+ param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)}
878
+ sampler = ParameterSampler(
879
+ param_distributions=param_distributions, n_iter=10, random_state=0
880
+ )
881
+ samples = [x for x in sampler]
882
+ assert len(samples) == 10
883
+ for sample in samples:
884
+ assert sample["kernel"] in ["rbf", "linear"]
885
+ assert 0 <= sample["C"] <= 1
886
+
887
+ # test that repeated calls yield identical parameters
888
+ param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
889
+ sampler = ParameterSampler(
890
+ param_distributions=param_distributions, n_iter=3, random_state=0
891
+ )
892
+ assert [x for x in sampler] == [x for x in sampler]
893
+
894
+ param_distributions = {"C": uniform(0, 1)}
895
+ sampler = ParameterSampler(
896
+ param_distributions=param_distributions, n_iter=10, random_state=0
897
+ )
898
+ assert [x for x in sampler] == [x for x in sampler]
899
+
900
+
901
+ def check_cv_results_array_types(search, param_keys, score_keys):
902
+ # Check if the search `cv_results`'s array are of correct types
903
+ cv_results = search.cv_results_
904
+ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys)
905
+ assert all(cv_results[key].dtype == object for key in param_keys)
906
+ assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys)
907
+ assert all(
908
+ cv_results[key].dtype == np.float64
909
+ for key in score_keys
910
+ if not key.startswith("rank")
911
+ )
912
+
913
+ scorer_keys = search.scorer_.keys() if search.multimetric_ else ["score"]
914
+
915
+ for key in scorer_keys:
916
+ assert cv_results["rank_test_%s" % key].dtype == np.int32
917
+
918
+
919
+ def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand, extra_keys=()):
920
+ # Test the search.cv_results_ contains all the required results
921
+ all_keys = param_keys + score_keys + extra_keys
922
+ assert_array_equal(sorted(cv_results.keys()), sorted(all_keys + ("params",)))
923
+ assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys)
924
+
925
+
926
+ def test_grid_search_cv_results():
927
+ X, y = make_classification(n_samples=50, n_features=4, random_state=42)
928
+
929
+ n_grid_points = 6
930
+ params = [
931
+ dict(
932
+ kernel=[
933
+ "rbf",
934
+ ],
935
+ C=[1, 10],
936
+ gamma=[0.1, 1],
937
+ ),
938
+ dict(
939
+ kernel=[
940
+ "poly",
941
+ ],
942
+ degree=[1, 2],
943
+ ),
944
+ ]
945
+
946
+ param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel")
947
+ score_keys = (
948
+ "mean_test_score",
949
+ "mean_train_score",
950
+ "rank_test_score",
951
+ "split0_test_score",
952
+ "split1_test_score",
953
+ "split2_test_score",
954
+ "split0_train_score",
955
+ "split1_train_score",
956
+ "split2_train_score",
957
+ "std_test_score",
958
+ "std_train_score",
959
+ "mean_fit_time",
960
+ "std_fit_time",
961
+ "mean_score_time",
962
+ "std_score_time",
963
+ )
964
+ n_candidates = n_grid_points
965
+
966
+ search = GridSearchCV(SVC(), cv=3, param_grid=params, return_train_score=True)
967
+ search.fit(X, y)
968
+ cv_results = search.cv_results_
969
+ # Check if score and timing are reasonable
970
+ assert all(cv_results["rank_test_score"] >= 1)
971
+ assert (all(cv_results[k] >= 0) for k in score_keys if k != "rank_test_score")
972
+ assert (
973
+ all(cv_results[k] <= 1)
974
+ for k in score_keys
975
+ if "time" not in k and k != "rank_test_score"
976
+ )
977
+ # Check cv_results structure
978
+ check_cv_results_array_types(search, param_keys, score_keys)
979
+ check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
980
+ # Check masking
981
+ cv_results = search.cv_results_
982
+
983
+ poly_results = [
984
+ (
985
+ cv_results["param_C"].mask[i]
986
+ and cv_results["param_gamma"].mask[i]
987
+ and not cv_results["param_degree"].mask[i]
988
+ )
989
+ for i in range(n_candidates)
990
+ if cv_results["param_kernel"][i] == "poly"
991
+ ]
992
+ assert all(poly_results)
993
+ assert len(poly_results) == 2
994
+
995
+ rbf_results = [
996
+ (
997
+ not cv_results["param_C"].mask[i]
998
+ and not cv_results["param_gamma"].mask[i]
999
+ and cv_results["param_degree"].mask[i]
1000
+ )
1001
+ for i in range(n_candidates)
1002
+ if cv_results["param_kernel"][i] == "rbf"
1003
+ ]
1004
+ assert all(rbf_results)
1005
+ assert len(rbf_results) == 4
1006
+
1007
+
1008
+ def test_random_search_cv_results():
1009
+ X, y = make_classification(n_samples=50, n_features=4, random_state=42)
1010
+
1011
+ n_search_iter = 30
1012
+
1013
+ params = [
1014
+ {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)},
1015
+ {"kernel": ["poly"], "degree": [2, 3]},
1016
+ ]
1017
+ param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel")
1018
+ score_keys = (
1019
+ "mean_test_score",
1020
+ "mean_train_score",
1021
+ "rank_test_score",
1022
+ "split0_test_score",
1023
+ "split1_test_score",
1024
+ "split2_test_score",
1025
+ "split0_train_score",
1026
+ "split1_train_score",
1027
+ "split2_train_score",
1028
+ "std_test_score",
1029
+ "std_train_score",
1030
+ "mean_fit_time",
1031
+ "std_fit_time",
1032
+ "mean_score_time",
1033
+ "std_score_time",
1034
+ )
1035
+ n_candidates = n_search_iter
1036
+
1037
+ search = RandomizedSearchCV(
1038
+ SVC(),
1039
+ n_iter=n_search_iter,
1040
+ cv=3,
1041
+ param_distributions=params,
1042
+ return_train_score=True,
1043
+ )
1044
+ search.fit(X, y)
1045
+ cv_results = search.cv_results_
1046
+ # Check results structure
1047
+ check_cv_results_array_types(search, param_keys, score_keys)
1048
+ check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
1049
+ assert all(
1050
+ (
1051
+ cv_results["param_C"].mask[i]
1052
+ and cv_results["param_gamma"].mask[i]
1053
+ and not cv_results["param_degree"].mask[i]
1054
+ )
1055
+ for i in range(n_candidates)
1056
+ if cv_results["param_kernel"][i] == "poly"
1057
+ )
1058
+ assert all(
1059
+ (
1060
+ not cv_results["param_C"].mask[i]
1061
+ and not cv_results["param_gamma"].mask[i]
1062
+ and cv_results["param_degree"].mask[i]
1063
+ )
1064
+ for i in range(n_candidates)
1065
+ if cv_results["param_kernel"][i] == "rbf"
1066
+ )
1067
+
1068
+
1069
+ @pytest.mark.parametrize(
1070
+ "SearchCV, specialized_params",
1071
+ [
1072
+ (GridSearchCV, {"param_grid": {"C": [1, 10]}}),
1073
+ (RandomizedSearchCV, {"param_distributions": {"C": [1, 10]}, "n_iter": 2}),
1074
+ ],
1075
+ )
1076
+ def test_search_default_iid(SearchCV, specialized_params):
1077
+ # Test the IID parameter TODO: Clearly this test does something else???
1078
+ # noise-free simple 2d-data
1079
+ X, y = make_blobs(
1080
+ centers=[[0, 0], [1, 0], [0, 1], [1, 1]],
1081
+ random_state=0,
1082
+ cluster_std=0.1,
1083
+ shuffle=False,
1084
+ n_samples=80,
1085
+ )
1086
+ # split dataset into two folds that are not iid
1087
+ # first one contains data of all 4 blobs, second only from two.
1088
+ mask = np.ones(X.shape[0], dtype=bool)
1089
+ mask[np.where(y == 1)[0][::2]] = 0
1090
+ mask[np.where(y == 2)[0][::2]] = 0
1091
+ # this leads to perfect classification on one fold and a score of 1/3 on
1092
+ # the other
1093
+ # create "cv" for splits
1094
+ cv = [[mask, ~mask], [~mask, mask]]
1095
+
1096
+ common_params = {"estimator": SVC(), "cv": cv, "return_train_score": True}
1097
+ search = SearchCV(**common_params, **specialized_params)
1098
+ search.fit(X, y)
1099
+
1100
+ test_cv_scores = np.array(
1101
+ [
1102
+ search.cv_results_["split%d_test_score" % s][0]
1103
+ for s in range(search.n_splits_)
1104
+ ]
1105
+ )
1106
+ test_mean = search.cv_results_["mean_test_score"][0]
1107
+ test_std = search.cv_results_["std_test_score"][0]
1108
+
1109
+ train_cv_scores = np.array(
1110
+ [
1111
+ search.cv_results_["split%d_train_score" % s][0]
1112
+ for s in range(search.n_splits_)
1113
+ ]
1114
+ )
1115
+ train_mean = search.cv_results_["mean_train_score"][0]
1116
+ train_std = search.cv_results_["std_train_score"][0]
1117
+
1118
+ assert search.cv_results_["param_C"][0] == 1
1119
+ # scores are the same as above
1120
+ assert_allclose(test_cv_scores, [1, 1.0 / 3.0])
1121
+ assert_allclose(train_cv_scores, [1, 1])
1122
+ # Unweighted mean/std is used
1123
+ assert test_mean == pytest.approx(np.mean(test_cv_scores))
1124
+ assert test_std == pytest.approx(np.std(test_cv_scores))
1125
+
1126
+ # For the train scores, we do not take a weighted mean irrespective of
1127
+ # i.i.d. or not
1128
+ assert train_mean == pytest.approx(1)
1129
+ assert train_std == pytest.approx(0)
1130
+
1131
+
1132
+ def test_grid_search_cv_results_multimetric():
1133
+ X, y = make_classification(n_samples=50, n_features=4, random_state=42)
1134
+
1135
+ n_splits = 3
1136
+ params = [
1137
+ dict(
1138
+ kernel=[
1139
+ "rbf",
1140
+ ],
1141
+ C=[1, 10],
1142
+ gamma=[0.1, 1],
1143
+ ),
1144
+ dict(
1145
+ kernel=[
1146
+ "poly",
1147
+ ],
1148
+ degree=[1, 2],
1149
+ ),
1150
+ ]
1151
+
1152
+ grid_searches = []
1153
+ for scoring in (
1154
+ {"accuracy": make_scorer(accuracy_score), "recall": make_scorer(recall_score)},
1155
+ "accuracy",
1156
+ "recall",
1157
+ ):
1158
+ grid_search = GridSearchCV(
1159
+ SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False
1160
+ )
1161
+ grid_search.fit(X, y)
1162
+ grid_searches.append(grid_search)
1163
+
1164
+ compare_cv_results_multimetric_with_single(*grid_searches)
1165
+
1166
+
1167
+ def test_random_search_cv_results_multimetric():
1168
+ X, y = make_classification(n_samples=50, n_features=4, random_state=42)
1169
+
1170
+ n_splits = 3
1171
+ n_search_iter = 30
1172
+
1173
+ # Scipy 0.12's stats dists do not accept seed, hence we use param grid
1174
+ params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1))
1175
+ for refit in (True, False):
1176
+ random_searches = []
1177
+ for scoring in (("accuracy", "recall"), "accuracy", "recall"):
1178
+ # If True, for multi-metric pass refit='accuracy'
1179
+ if refit:
1180
+ probability = True
1181
+ refit = "accuracy" if isinstance(scoring, tuple) else refit
1182
+ else:
1183
+ probability = False
1184
+ clf = SVC(probability=probability, random_state=42)
1185
+ random_search = RandomizedSearchCV(
1186
+ clf,
1187
+ n_iter=n_search_iter,
1188
+ cv=n_splits,
1189
+ param_distributions=params,
1190
+ scoring=scoring,
1191
+ refit=refit,
1192
+ random_state=0,
1193
+ )
1194
+ random_search.fit(X, y)
1195
+ random_searches.append(random_search)
1196
+
1197
+ compare_cv_results_multimetric_with_single(*random_searches)
1198
+ compare_refit_methods_when_refit_with_acc(
1199
+ random_searches[0], random_searches[1], refit
1200
+ )
1201
+
1202
+
1203
+ def compare_cv_results_multimetric_with_single(search_multi, search_acc, search_rec):
1204
+ """Compare multi-metric cv_results with the ensemble of multiple
1205
+ single metric cv_results from single metric grid/random search"""
1206
+
1207
+ assert search_multi.multimetric_
1208
+ assert_array_equal(sorted(search_multi.scorer_), ("accuracy", "recall"))
1209
+
1210
+ cv_results_multi = search_multi.cv_results_
1211
+ cv_results_acc_rec = {
1212
+ re.sub("_score$", "_accuracy", k): v for k, v in search_acc.cv_results_.items()
1213
+ }
1214
+ cv_results_acc_rec.update(
1215
+ {re.sub("_score$", "_recall", k): v for k, v in search_rec.cv_results_.items()}
1216
+ )
1217
+
1218
+ # Check if score and timing are reasonable, also checks if the keys
1219
+ # are present
1220
+ assert all(
1221
+ (
1222
+ np.all(cv_results_multi[k] <= 1)
1223
+ for k in (
1224
+ "mean_score_time",
1225
+ "std_score_time",
1226
+ "mean_fit_time",
1227
+ "std_fit_time",
1228
+ )
1229
+ )
1230
+ )
1231
+
1232
+ # Compare the keys, other than time keys, among multi-metric and
1233
+ # single metric grid search results. np.testing.assert_equal performs a
1234
+ # deep nested comparison of the two cv_results dicts
1235
+ np.testing.assert_equal(
1236
+ {k: v for k, v in cv_results_multi.items() if not k.endswith("_time")},
1237
+ {k: v for k, v in cv_results_acc_rec.items() if not k.endswith("_time")},
1238
+ )
1239
+
1240
+
1241
+ def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit):
1242
+ """Compare refit multi-metric search methods with single metric methods"""
1243
+ assert search_acc.refit == refit
1244
+ if refit:
1245
+ assert search_multi.refit == "accuracy"
1246
+ else:
1247
+ assert not search_multi.refit
1248
+ return # search cannot predict/score without refit
1249
+
1250
+ X, y = make_blobs(n_samples=100, n_features=4, random_state=42)
1251
+ for method in ("predict", "predict_proba", "predict_log_proba"):
1252
+ assert_almost_equal(
1253
+ getattr(search_multi, method)(X), getattr(search_acc, method)(X)
1254
+ )
1255
+ assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y))
1256
+ for key in ("best_index_", "best_score_", "best_params_"):
1257
+ assert getattr(search_multi, key) == getattr(search_acc, key)
1258
+
1259
+
1260
+ @pytest.mark.parametrize(
1261
+ "search_cv",
1262
+ [
1263
+ RandomizedSearchCV(
1264
+ estimator=DecisionTreeClassifier(),
1265
+ param_distributions={"max_depth": [5, 10]},
1266
+ ),
1267
+ GridSearchCV(
1268
+ estimator=DecisionTreeClassifier(), param_grid={"max_depth": [5, 10]}
1269
+ ),
1270
+ ],
1271
+ )
1272
+ def test_search_cv_score_samples_error(search_cv):
1273
+ X, y = make_blobs(n_samples=100, n_features=4, random_state=42)
1274
+ search_cv.fit(X, y)
1275
+
1276
+ # Make sure to error out when underlying estimator does not implement
1277
+ # the method `score_samples`
1278
+ outer_msg = f"'{search_cv.__class__.__name__}' has no attribute 'score_samples'"
1279
+ inner_msg = "'DecisionTreeClassifier' object has no attribute 'score_samples'"
1280
+
1281
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
1282
+ search_cv.score_samples(X)
1283
+ assert isinstance(exec_info.value.__cause__, AttributeError)
1284
+ assert inner_msg == str(exec_info.value.__cause__)
1285
+
1286
+
1287
+ @pytest.mark.parametrize(
1288
+ "search_cv",
1289
+ [
1290
+ RandomizedSearchCV(
1291
+ estimator=LocalOutlierFactor(novelty=True),
1292
+ param_distributions={"n_neighbors": [5, 10]},
1293
+ scoring="precision",
1294
+ ),
1295
+ GridSearchCV(
1296
+ estimator=LocalOutlierFactor(novelty=True),
1297
+ param_grid={"n_neighbors": [5, 10]},
1298
+ scoring="precision",
1299
+ ),
1300
+ ],
1301
+ )
1302
+ def test_search_cv_score_samples_method(search_cv):
1303
+ # Set parameters
1304
+ rng = np.random.RandomState(42)
1305
+ n_samples = 300
1306
+ outliers_fraction = 0.15
1307
+ n_outliers = int(outliers_fraction * n_samples)
1308
+ n_inliers = n_samples - n_outliers
1309
+
1310
+ # Create dataset
1311
+ X = make_blobs(
1312
+ n_samples=n_inliers,
1313
+ n_features=2,
1314
+ centers=[[0, 0], [0, 0]],
1315
+ cluster_std=0.5,
1316
+ random_state=0,
1317
+ )[0]
1318
+ # Add some noisy points
1319
+ X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0)
1320
+
1321
+ # Define labels to be able to score the estimator with `search_cv`
1322
+ y_true = np.array([1] * n_samples)
1323
+ y_true[-n_outliers:] = -1
1324
+
1325
+ # Fit on data
1326
+ search_cv.fit(X, y_true)
1327
+
1328
+ # Verify that the stand alone estimator yields the same results
1329
+ # as the ones obtained with *SearchCV
1330
+ assert_allclose(
1331
+ search_cv.score_samples(X), search_cv.best_estimator_.score_samples(X)
1332
+ )
1333
+
1334
+
1335
+ def test_search_cv_results_rank_tie_breaking():
1336
+ X, y = make_blobs(n_samples=50, random_state=42)
1337
+
1338
+ # The two C values are close enough to give similar models
1339
+ # which would result in a tie of their mean cv-scores
1340
+ param_grid = {"C": [1, 1.001, 0.001]}
1341
+
1342
+ grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True)
1343
+ random_search = RandomizedSearchCV(
1344
+ SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True
1345
+ )
1346
+
1347
+ for search in (grid_search, random_search):
1348
+ search.fit(X, y)
1349
+ cv_results = search.cv_results_
1350
+ # Check tie breaking strategy -
1351
+ # Check that there is a tie in the mean scores between
1352
+ # candidates 1 and 2 alone
1353
+ assert_almost_equal(
1354
+ cv_results["mean_test_score"][0], cv_results["mean_test_score"][1]
1355
+ )
1356
+ assert_almost_equal(
1357
+ cv_results["mean_train_score"][0], cv_results["mean_train_score"][1]
1358
+ )
1359
+ assert not np.allclose(
1360
+ cv_results["mean_test_score"][1], cv_results["mean_test_score"][2]
1361
+ )
1362
+ assert not np.allclose(
1363
+ cv_results["mean_train_score"][1], cv_results["mean_train_score"][2]
1364
+ )
1365
+ # 'min' rank should be assigned to the tied candidates
1366
+ assert_almost_equal(search.cv_results_["rank_test_score"], [1, 1, 3])
1367
+
1368
+
1369
+ def test_search_cv_results_none_param():
1370
+ X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
1371
+ estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
1372
+ est_parameters = {"random_state": [0, None]}
1373
+ cv = KFold()
1374
+
1375
+ for est in estimators:
1376
+ grid_search = GridSearchCV(
1377
+ est,
1378
+ est_parameters,
1379
+ cv=cv,
1380
+ ).fit(X, y)
1381
+ assert_array_equal(grid_search.cv_results_["param_random_state"], [0, None])
1382
+
1383
+
1384
+ @ignore_warnings()
1385
+ def test_search_cv_timing():
1386
+ svc = LinearSVC(dual="auto", random_state=0)
1387
+
1388
+ X = [
1389
+ [
1390
+ 1,
1391
+ ],
1392
+ [
1393
+ 2,
1394
+ ],
1395
+ [
1396
+ 3,
1397
+ ],
1398
+ [
1399
+ 4,
1400
+ ],
1401
+ ]
1402
+ y = [0, 1, 1, 0]
1403
+
1404
+ gs = GridSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0)
1405
+ rs = RandomizedSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0, n_iter=2)
1406
+
1407
+ for search in (gs, rs):
1408
+ search.fit(X, y)
1409
+ for key in ["mean_fit_time", "std_fit_time"]:
1410
+ # NOTE The precision of time.time in windows is not high
1411
+ # enough for the fit/score times to be non-zero for trivial X and y
1412
+ assert np.all(search.cv_results_[key] >= 0)
1413
+ assert np.all(search.cv_results_[key] < 1)
1414
+
1415
+ for key in ["mean_score_time", "std_score_time"]:
1416
+ assert search.cv_results_[key][1] >= 0
1417
+ assert search.cv_results_[key][0] == 0.0
1418
+ assert np.all(search.cv_results_[key] < 1)
1419
+
1420
+ assert hasattr(search, "refit_time_")
1421
+ assert isinstance(search.refit_time_, float)
1422
+ assert search.refit_time_ >= 0
1423
+
1424
+
1425
+ def test_grid_search_correct_score_results():
1426
+ # test that correct scores are used
1427
+ n_splits = 3
1428
+ clf = LinearSVC(dual="auto", random_state=0)
1429
+ X, y = make_blobs(random_state=0, centers=2)
1430
+ Cs = [0.1, 1, 10]
1431
+ for score in ["f1", "roc_auc"]:
1432
+ grid_search = GridSearchCV(clf, {"C": Cs}, scoring=score, cv=n_splits)
1433
+ cv_results = grid_search.fit(X, y).cv_results_
1434
+
1435
+ # Test scorer names
1436
+ result_keys = list(cv_results.keys())
1437
+ expected_keys = ("mean_test_score", "rank_test_score") + tuple(
1438
+ "split%d_test_score" % cv_i for cv_i in range(n_splits)
1439
+ )
1440
+ assert all(np.isin(expected_keys, result_keys))
1441
+
1442
+ cv = StratifiedKFold(n_splits=n_splits)
1443
+ n_splits = grid_search.n_splits_
1444
+ for candidate_i, C in enumerate(Cs):
1445
+ clf.set_params(C=C)
1446
+ cv_scores = np.array(
1447
+ [
1448
+ grid_search.cv_results_["split%d_test_score" % s][candidate_i]
1449
+ for s in range(n_splits)
1450
+ ]
1451
+ )
1452
+ for i, (train, test) in enumerate(cv.split(X, y)):
1453
+ clf.fit(X[train], y[train])
1454
+ if score == "f1":
1455
+ correct_score = f1_score(y[test], clf.predict(X[test]))
1456
+ elif score == "roc_auc":
1457
+ dec = clf.decision_function(X[test])
1458
+ correct_score = roc_auc_score(y[test], dec)
1459
+ assert_almost_equal(correct_score, cv_scores[i])
1460
+
1461
+
1462
+ def test_pickle():
1463
+ # Test that a fit search can be pickled
1464
+ clf = MockClassifier()
1465
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=True, cv=3)
1466
+ grid_search.fit(X, y)
1467
+ grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
1468
+ assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X))
1469
+
1470
+ random_search = RandomizedSearchCV(
1471
+ clf, {"foo_param": [1, 2, 3]}, refit=True, n_iter=3, cv=3
1472
+ )
1473
+ random_search.fit(X, y)
1474
+ random_search_pickled = pickle.loads(pickle.dumps(random_search))
1475
+ assert_array_almost_equal(
1476
+ random_search.predict(X), random_search_pickled.predict(X)
1477
+ )
1478
+
1479
+
1480
+ def test_grid_search_with_multioutput_data():
1481
+ # Test search with multi-output estimator
1482
+
1483
+ X, y = make_multilabel_classification(return_indicator=True, random_state=0)
1484
+
1485
+ est_parameters = {"max_depth": [1, 2, 3, 4]}
1486
+ cv = KFold()
1487
+
1488
+ estimators = [
1489
+ DecisionTreeRegressor(random_state=0),
1490
+ DecisionTreeClassifier(random_state=0),
1491
+ ]
1492
+
1493
+ # Test with grid search cv
1494
+ for est in estimators:
1495
+ grid_search = GridSearchCV(est, est_parameters, cv=cv)
1496
+ grid_search.fit(X, y)
1497
+ res_params = grid_search.cv_results_["params"]
1498
+ for cand_i in range(len(res_params)):
1499
+ est.set_params(**res_params[cand_i])
1500
+
1501
+ for i, (train, test) in enumerate(cv.split(X, y)):
1502
+ est.fit(X[train], y[train])
1503
+ correct_score = est.score(X[test], y[test])
1504
+ assert_almost_equal(
1505
+ correct_score,
1506
+ grid_search.cv_results_["split%d_test_score" % i][cand_i],
1507
+ )
1508
+
1509
+ # Test with a randomized search
1510
+ for est in estimators:
1511
+ random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3)
1512
+ random_search.fit(X, y)
1513
+ res_params = random_search.cv_results_["params"]
1514
+ for cand_i in range(len(res_params)):
1515
+ est.set_params(**res_params[cand_i])
1516
+
1517
+ for i, (train, test) in enumerate(cv.split(X, y)):
1518
+ est.fit(X[train], y[train])
1519
+ correct_score = est.score(X[test], y[test])
1520
+ assert_almost_equal(
1521
+ correct_score,
1522
+ random_search.cv_results_["split%d_test_score" % i][cand_i],
1523
+ )
1524
+
1525
+
1526
+ def test_predict_proba_disabled():
1527
+ # Test predict_proba when disabled on estimator.
1528
+ X = np.arange(20).reshape(5, -1)
1529
+ y = [0, 0, 1, 1, 1]
1530
+ clf = SVC(probability=False)
1531
+ gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
1532
+ assert not hasattr(gs, "predict_proba")
1533
+
1534
+
1535
+ def test_grid_search_allows_nans():
1536
+ # Test GridSearchCV with SimpleImputer
1537
+ X = np.arange(20, dtype=np.float64).reshape(5, -1)
1538
+ X[2, :] = np.nan
1539
+ y = [0, 0, 1, 1, 1]
1540
+ p = Pipeline(
1541
+ [
1542
+ ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)),
1543
+ ("classifier", MockClassifier()),
1544
+ ]
1545
+ )
1546
+ GridSearchCV(p, {"classifier__foo_param": [1, 2, 3]}, cv=2).fit(X, y)
1547
+
1548
+
1549
+ class FailingClassifier(BaseEstimator):
1550
+ """Classifier that raises a ValueError on fit()"""
1551
+
1552
+ FAILING_PARAMETER = 2
1553
+
1554
+ def __init__(self, parameter=None):
1555
+ self.parameter = parameter
1556
+
1557
+ def fit(self, X, y=None):
1558
+ if self.parameter == FailingClassifier.FAILING_PARAMETER:
1559
+ raise ValueError("Failing classifier failed as required")
1560
+
1561
+ def predict(self, X):
1562
+ return np.zeros(X.shape[0])
1563
+
1564
+ def score(self, X=None, Y=None):
1565
+ return 0.0
1566
+
1567
+
1568
+ def test_grid_search_failing_classifier():
1569
+ # GridSearchCV with on_error != 'raise'
1570
+ # Ensures that a warning is raised and score reset where appropriate.
1571
+
1572
+ X, y = make_classification(n_samples=20, n_features=10, random_state=0)
1573
+
1574
+ clf = FailingClassifier()
1575
+
1576
+ # refit=False because we only want to check that errors caused by fits
1577
+ # to individual folds will be caught and warnings raised instead. If
1578
+ # refit was done, then an exception would be raised on refit and not
1579
+ # caught by grid_search (expected behavior), and this would cause an
1580
+ # error in this test.
1581
+ gs = GridSearchCV(
1582
+ clf,
1583
+ [{"parameter": [0, 1, 2]}],
1584
+ scoring="accuracy",
1585
+ refit=False,
1586
+ error_score=0.0,
1587
+ )
1588
+
1589
+ warning_message = re.compile(
1590
+ "5 fits failed.+total of 15.+The score on these"
1591
+ r" train-test partitions for these parameters will be set to 0\.0.+"
1592
+ "5 fits failed with the following error.+ValueError.+Failing classifier failed"
1593
+ " as required",
1594
+ flags=re.DOTALL,
1595
+ )
1596
+ with pytest.warns(FitFailedWarning, match=warning_message):
1597
+ gs.fit(X, y)
1598
+ n_candidates = len(gs.cv_results_["params"])
1599
+
1600
+ # Ensure that grid scores were set to zero as required for those fits
1601
+ # that are expected to fail.
1602
+ def get_cand_scores(i):
1603
+ return np.array(
1604
+ [gs.cv_results_["split%d_test_score" % s][i] for s in range(gs.n_splits_)]
1605
+ )
1606
+
1607
+ assert all(
1608
+ (
1609
+ np.all(get_cand_scores(cand_i) == 0.0)
1610
+ for cand_i in range(n_candidates)
1611
+ if gs.cv_results_["param_parameter"][cand_i]
1612
+ == FailingClassifier.FAILING_PARAMETER
1613
+ )
1614
+ )
1615
+
1616
+ gs = GridSearchCV(
1617
+ clf,
1618
+ [{"parameter": [0, 1, 2]}],
1619
+ scoring="accuracy",
1620
+ refit=False,
1621
+ error_score=float("nan"),
1622
+ )
1623
+ warning_message = re.compile(
1624
+ "5 fits failed.+total of 15.+The score on these"
1625
+ r" train-test partitions for these parameters will be set to nan.+"
1626
+ "5 fits failed with the following error.+ValueError.+Failing classifier failed"
1627
+ " as required",
1628
+ flags=re.DOTALL,
1629
+ )
1630
+ with pytest.warns(FitFailedWarning, match=warning_message):
1631
+ gs.fit(X, y)
1632
+ n_candidates = len(gs.cv_results_["params"])
1633
+ assert all(
1634
+ np.all(np.isnan(get_cand_scores(cand_i)))
1635
+ for cand_i in range(n_candidates)
1636
+ if gs.cv_results_["param_parameter"][cand_i]
1637
+ == FailingClassifier.FAILING_PARAMETER
1638
+ )
1639
+
1640
+ ranks = gs.cv_results_["rank_test_score"]
1641
+
1642
+ # Check that succeeded estimators have lower ranks
1643
+ assert ranks[0] <= 2 and ranks[1] <= 2
1644
+ # Check that failed estimator has the highest rank
1645
+ assert ranks[clf.FAILING_PARAMETER] == 3
1646
+ assert gs.best_index_ != clf.FAILING_PARAMETER
1647
+
1648
+
1649
+ def test_grid_search_classifier_all_fits_fail():
1650
+ X, y = make_classification(n_samples=20, n_features=10, random_state=0)
1651
+
1652
+ clf = FailingClassifier()
1653
+
1654
+ gs = GridSearchCV(
1655
+ clf,
1656
+ [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}],
1657
+ error_score=0.0,
1658
+ )
1659
+
1660
+ warning_message = re.compile(
1661
+ (
1662
+ "All the 15 fits failed.+15 fits failed with the following"
1663
+ " error.+ValueError.+Failing classifier failed as required"
1664
+ ),
1665
+ flags=re.DOTALL,
1666
+ )
1667
+ with pytest.raises(ValueError, match=warning_message):
1668
+ gs.fit(X, y)
1669
+
1670
+
1671
+ def test_grid_search_failing_classifier_raise():
1672
+ # GridSearchCV with on_error == 'raise' raises the error
1673
+
1674
+ X, y = make_classification(n_samples=20, n_features=10, random_state=0)
1675
+
1676
+ clf = FailingClassifier()
1677
+
1678
+ # refit=False because we want to test the behaviour of the grid search part
1679
+ gs = GridSearchCV(
1680
+ clf,
1681
+ [{"parameter": [0, 1, 2]}],
1682
+ scoring="accuracy",
1683
+ refit=False,
1684
+ error_score="raise",
1685
+ )
1686
+
1687
+ # FailingClassifier issues a ValueError so this is what we look for.
1688
+ with pytest.raises(ValueError):
1689
+ gs.fit(X, y)
1690
+
1691
+
1692
+ def test_parameters_sampler_replacement():
1693
+ # raise warning if n_iter is bigger than total parameter space
1694
+ params = [
1695
+ {"first": [0, 1], "second": ["a", "b", "c"]},
1696
+ {"third": ["two", "values"]},
1697
+ ]
1698
+ sampler = ParameterSampler(params, n_iter=9)
1699
+ n_iter = 9
1700
+ grid_size = 8
1701
+ expected_warning = (
1702
+ "The total space of parameters %d is smaller "
1703
+ "than n_iter=%d. Running %d iterations. For "
1704
+ "exhaustive searches, use GridSearchCV." % (grid_size, n_iter, grid_size)
1705
+ )
1706
+ with pytest.warns(UserWarning, match=expected_warning):
1707
+ list(sampler)
1708
+
1709
+ # degenerates to GridSearchCV if n_iter the same as grid_size
1710
+ sampler = ParameterSampler(params, n_iter=8)
1711
+ samples = list(sampler)
1712
+ assert len(samples) == 8
1713
+ for values in ParameterGrid(params):
1714
+ assert values in samples
1715
+ assert len(ParameterSampler(params, n_iter=1000)) == 8
1716
+
1717
+ # test sampling without replacement in a large grid
1718
+ params = {"a": range(10), "b": range(10), "c": range(10)}
1719
+ sampler = ParameterSampler(params, n_iter=99, random_state=42)
1720
+ samples = list(sampler)
1721
+ assert len(samples) == 99
1722
+ hashable_samples = ["a%db%dc%d" % (p["a"], p["b"], p["c"]) for p in samples]
1723
+ assert len(set(hashable_samples)) == 99
1724
+
1725
+ # doesn't go into infinite loops
1726
+ params_distribution = {"first": bernoulli(0.5), "second": ["a", "b", "c"]}
1727
+ sampler = ParameterSampler(params_distribution, n_iter=7)
1728
+ samples = list(sampler)
1729
+ assert len(samples) == 7
1730
+
1731
+
1732
+ def test_stochastic_gradient_loss_param():
1733
+ # Make sure the predict_proba works when loss is specified
1734
+ # as one of the parameters in the param_grid.
1735
+ param_grid = {
1736
+ "loss": ["log_loss"],
1737
+ }
1738
+ X = np.arange(24).reshape(6, -1)
1739
+ y = [0, 0, 0, 1, 1, 1]
1740
+ clf = GridSearchCV(
1741
+ estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3
1742
+ )
1743
+
1744
+ # When the estimator is not fitted, `predict_proba` is not available as the
1745
+ # loss is 'hinge'.
1746
+ assert not hasattr(clf, "predict_proba")
1747
+ clf.fit(X, y)
1748
+ clf.predict_proba(X)
1749
+ clf.predict_log_proba(X)
1750
+
1751
+ # Make sure `predict_proba` is not available when setting loss=['hinge']
1752
+ # in param_grid
1753
+ param_grid = {
1754
+ "loss": ["hinge"],
1755
+ }
1756
+ clf = GridSearchCV(
1757
+ estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3
1758
+ )
1759
+ assert not hasattr(clf, "predict_proba")
1760
+ clf.fit(X, y)
1761
+ assert not hasattr(clf, "predict_proba")
1762
+
1763
+
1764
+ def test_search_train_scores_set_to_false():
1765
+ X = np.arange(6).reshape(6, -1)
1766
+ y = [0, 0, 0, 1, 1, 1]
1767
+ clf = LinearSVC(dual="auto", random_state=0)
1768
+
1769
+ gs = GridSearchCV(clf, param_grid={"C": [0.1, 0.2]}, cv=3)
1770
+ gs.fit(X, y)
1771
+
1772
+
1773
+ def test_grid_search_cv_splits_consistency():
1774
+ # Check if a one time iterable is accepted as a cv parameter.
1775
+ n_samples = 100
1776
+ n_splits = 5
1777
+ X, y = make_classification(n_samples=n_samples, random_state=0)
1778
+
1779
+ gs = GridSearchCV(
1780
+ LinearSVC(dual="auto", random_state=0),
1781
+ param_grid={"C": [0.1, 0.2, 0.3]},
1782
+ cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
1783
+ return_train_score=True,
1784
+ )
1785
+ gs.fit(X, y)
1786
+
1787
+ gs2 = GridSearchCV(
1788
+ LinearSVC(dual="auto", random_state=0),
1789
+ param_grid={"C": [0.1, 0.2, 0.3]},
1790
+ cv=KFold(n_splits=n_splits),
1791
+ return_train_score=True,
1792
+ )
1793
+ gs2.fit(X, y)
1794
+
1795
+ # Give generator as a cv parameter
1796
+ assert isinstance(
1797
+ KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y),
1798
+ GeneratorType,
1799
+ )
1800
+ gs3 = GridSearchCV(
1801
+ LinearSVC(dual="auto", random_state=0),
1802
+ param_grid={"C": [0.1, 0.2, 0.3]},
1803
+ cv=KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y),
1804
+ return_train_score=True,
1805
+ )
1806
+ gs3.fit(X, y)
1807
+
1808
+ gs4 = GridSearchCV(
1809
+ LinearSVC(dual="auto", random_state=0),
1810
+ param_grid={"C": [0.1, 0.2, 0.3]},
1811
+ cv=KFold(n_splits=n_splits, shuffle=True, random_state=0),
1812
+ return_train_score=True,
1813
+ )
1814
+ gs4.fit(X, y)
1815
+
1816
+ def _pop_time_keys(cv_results):
1817
+ for key in (
1818
+ "mean_fit_time",
1819
+ "std_fit_time",
1820
+ "mean_score_time",
1821
+ "std_score_time",
1822
+ ):
1823
+ cv_results.pop(key)
1824
+ return cv_results
1825
+
1826
+ # Check if generators are supported as cv and
1827
+ # that the splits are consistent
1828
+ np.testing.assert_equal(
1829
+ _pop_time_keys(gs3.cv_results_), _pop_time_keys(gs4.cv_results_)
1830
+ )
1831
+
1832
+ # OneTimeSplitter is a non-re-entrant cv where split can be called only
1833
+ # once if ``cv.split`` is called once per param setting in GridSearchCV.fit
1834
+ # the 2nd and 3rd parameter will not be evaluated as no train/test indices
1835
+ # will be generated for the 2nd and subsequent cv.split calls.
1836
+ # This is a check to make sure cv.split is not called once per param
1837
+ # setting.
1838
+ np.testing.assert_equal(
1839
+ {k: v for k, v in gs.cv_results_.items() if not k.endswith("_time")},
1840
+ {k: v for k, v in gs2.cv_results_.items() if not k.endswith("_time")},
1841
+ )
1842
+
1843
+ # Check consistency of folds across the parameters
1844
+ gs = GridSearchCV(
1845
+ LinearSVC(dual="auto", random_state=0),
1846
+ param_grid={"C": [0.1, 0.1, 0.2, 0.2]},
1847
+ cv=KFold(n_splits=n_splits, shuffle=True),
1848
+ return_train_score=True,
1849
+ )
1850
+ gs.fit(X, y)
1851
+
1852
+ # As the first two param settings (C=0.1) and the next two param
1853
+ # settings (C=0.2) are same, the test and train scores must also be
1854
+ # same as long as the same train/test indices are generated for all
1855
+ # the cv splits, for both param setting
1856
+ for score_type in ("train", "test"):
1857
+ per_param_scores = {}
1858
+ for param_i in range(4):
1859
+ per_param_scores[param_i] = [
1860
+ gs.cv_results_["split%d_%s_score" % (s, score_type)][param_i]
1861
+ for s in range(5)
1862
+ ]
1863
+
1864
+ assert_array_almost_equal(per_param_scores[0], per_param_scores[1])
1865
+ assert_array_almost_equal(per_param_scores[2], per_param_scores[3])
1866
+
1867
+
1868
+ def test_transform_inverse_transform_round_trip():
1869
+ clf = MockClassifier()
1870
+ grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3)
1871
+
1872
+ grid_search.fit(X, y)
1873
+ X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
1874
+ assert_array_equal(X, X_round_trip)
1875
+
1876
+
1877
+ def test_custom_run_search():
1878
+ def check_results(results, gscv):
1879
+ exp_results = gscv.cv_results_
1880
+ assert sorted(results.keys()) == sorted(exp_results)
1881
+ for k in results:
1882
+ if not k.endswith("_time"):
1883
+ # XXX: results['params'] is a list :|
1884
+ results[k] = np.asanyarray(results[k])
1885
+ if results[k].dtype.kind == "O":
1886
+ assert_array_equal(
1887
+ exp_results[k], results[k], err_msg="Checking " + k
1888
+ )
1889
+ else:
1890
+ assert_allclose(exp_results[k], results[k], err_msg="Checking " + k)
1891
+
1892
+ def fit_grid(param_grid):
1893
+ return GridSearchCV(clf, param_grid, return_train_score=True).fit(X, y)
1894
+
1895
+ class CustomSearchCV(BaseSearchCV):
1896
+ def __init__(self, estimator, **kwargs):
1897
+ super().__init__(estimator, **kwargs)
1898
+
1899
+ def _run_search(self, evaluate):
1900
+ results = evaluate([{"max_depth": 1}, {"max_depth": 2}])
1901
+ check_results(results, fit_grid({"max_depth": [1, 2]}))
1902
+ results = evaluate([{"min_samples_split": 5}, {"min_samples_split": 10}])
1903
+ check_results(
1904
+ results,
1905
+ fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]),
1906
+ )
1907
+
1908
+ # Using regressor to make sure each score differs
1909
+ clf = DecisionTreeRegressor(random_state=0)
1910
+ X, y = make_classification(n_samples=100, n_informative=4, random_state=0)
1911
+ mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y)
1912
+ gscv = fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}])
1913
+
1914
+ results = mycv.cv_results_
1915
+ check_results(results, gscv)
1916
+ for attr in dir(gscv):
1917
+ if (
1918
+ attr[0].islower()
1919
+ and attr[-1:] == "_"
1920
+ and attr
1921
+ not in {
1922
+ "cv_results_",
1923
+ "best_estimator_",
1924
+ "refit_time_",
1925
+ "classes_",
1926
+ "scorer_",
1927
+ }
1928
+ ):
1929
+ assert getattr(gscv, attr) == getattr(mycv, attr), (
1930
+ "Attribute %s not equal" % attr
1931
+ )
1932
+
1933
+
1934
+ def test__custom_fit_no_run_search():
1935
+ class NoRunSearchSearchCV(BaseSearchCV):
1936
+ def __init__(self, estimator, **kwargs):
1937
+ super().__init__(estimator, **kwargs)
1938
+
1939
+ def fit(self, X, y=None, groups=None, **fit_params):
1940
+ return self
1941
+
1942
+ # this should not raise any exceptions
1943
+ NoRunSearchSearchCV(SVC()).fit(X, y)
1944
+
1945
+ class BadSearchCV(BaseSearchCV):
1946
+ def __init__(self, estimator, **kwargs):
1947
+ super().__init__(estimator, **kwargs)
1948
+
1949
+ with pytest.raises(NotImplementedError, match="_run_search not implemented."):
1950
+ # this should raise a NotImplementedError
1951
+ BadSearchCV(SVC()).fit(X, y)
1952
+
1953
+
1954
+ def test_empty_cv_iterator_error():
1955
+ # Use global X, y
1956
+
1957
+ # create cv
1958
+ cv = KFold(n_splits=3).split(X)
1959
+
1960
+ # pop all of it, this should cause the expected ValueError
1961
+ [u for u in cv]
1962
+ # cv is empty now
1963
+
1964
+ train_size = 100
1965
+ ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4)
1966
+
1967
+ # assert that this raises an error
1968
+ with pytest.raises(
1969
+ ValueError,
1970
+ match=(
1971
+ "No fits were performed. "
1972
+ "Was the CV iterator empty\\? "
1973
+ "Were there no candidates\\?"
1974
+ ),
1975
+ ):
1976
+ ridge.fit(X[:train_size], y[:train_size])
1977
+
1978
+
1979
+ def test_random_search_bad_cv():
1980
+ # Use global X, y
1981
+
1982
+ class BrokenKFold(KFold):
1983
+ def get_n_splits(self, *args, **kw):
1984
+ return 1
1985
+
1986
+ # create bad cv
1987
+ cv = BrokenKFold(n_splits=3)
1988
+
1989
+ train_size = 100
1990
+ ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4)
1991
+
1992
+ # assert that this raises an error
1993
+ with pytest.raises(
1994
+ ValueError,
1995
+ match=(
1996
+ "cv.split and cv.get_n_splits returned "
1997
+ "inconsistent results. Expected \\d+ "
1998
+ "splits, got \\d+"
1999
+ ),
2000
+ ):
2001
+ ridge.fit(X[:train_size], y[:train_size])
2002
+
2003
+
2004
+ @pytest.mark.parametrize("return_train_score", [False, True])
2005
+ @pytest.mark.parametrize(
2006
+ "SearchCV, specialized_params",
2007
+ [
2008
+ (GridSearchCV, {"param_grid": {"max_depth": [2, 3, 5, 8]}}),
2009
+ (
2010
+ RandomizedSearchCV,
2011
+ {"param_distributions": {"max_depth": [2, 3, 5, 8]}, "n_iter": 4},
2012
+ ),
2013
+ ],
2014
+ )
2015
+ def test_searchcv_raise_warning_with_non_finite_score(
2016
+ SearchCV, specialized_params, return_train_score
2017
+ ):
2018
+ # Non-regression test for:
2019
+ # https://github.com/scikit-learn/scikit-learn/issues/10529
2020
+ # Check that we raise a UserWarning when a non-finite score is
2021
+ # computed in the SearchCV
2022
+ X, y = make_classification(n_classes=2, random_state=0)
2023
+
2024
+ class FailingScorer:
2025
+ """Scorer that will fail for some split but not all."""
2026
+
2027
+ def __init__(self):
2028
+ self.n_counts = 0
2029
+
2030
+ def __call__(self, estimator, X, y):
2031
+ self.n_counts += 1
2032
+ if self.n_counts % 5 == 0:
2033
+ return np.nan
2034
+ return 1
2035
+
2036
+ grid = SearchCV(
2037
+ DecisionTreeClassifier(),
2038
+ scoring=FailingScorer(),
2039
+ cv=3,
2040
+ return_train_score=return_train_score,
2041
+ **specialized_params,
2042
+ )
2043
+
2044
+ with pytest.warns(UserWarning) as warn_msg:
2045
+ grid.fit(X, y)
2046
+
2047
+ set_with_warning = ["test", "train"] if return_train_score else ["test"]
2048
+ assert len(warn_msg) == len(set_with_warning)
2049
+ for msg, dataset in zip(warn_msg, set_with_warning):
2050
+ assert f"One or more of the {dataset} scores are non-finite" in str(msg.message)
2051
+
2052
+ # all non-finite scores should be equally ranked last
2053
+ last_rank = grid.cv_results_["rank_test_score"].max()
2054
+ non_finite_mask = np.isnan(grid.cv_results_["mean_test_score"])
2055
+ assert_array_equal(grid.cv_results_["rank_test_score"][non_finite_mask], last_rank)
2056
+ # all finite scores should be better ranked than the non-finite scores
2057
+ assert np.all(grid.cv_results_["rank_test_score"][~non_finite_mask] < last_rank)
2058
+
2059
+
2060
+ def test_callable_multimetric_confusion_matrix():
2061
+ # Test callable with many metrics inserts the correct names and metrics
2062
+ # into the search cv object
2063
+ def custom_scorer(clf, X, y):
2064
+ y_pred = clf.predict(X)
2065
+ cm = confusion_matrix(y, y_pred)
2066
+ return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]}
2067
+
2068
+ X, y = make_classification(n_samples=40, n_features=4, random_state=42)
2069
+ est = LinearSVC(dual="auto", random_state=42)
2070
+ search = GridSearchCV(est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="fp")
2071
+
2072
+ search.fit(X, y)
2073
+
2074
+ score_names = ["tn", "fp", "fn", "tp"]
2075
+ for name in score_names:
2076
+ assert "mean_test_{}".format(name) in search.cv_results_
2077
+
2078
+ y_pred = search.predict(X)
2079
+ cm = confusion_matrix(y, y_pred)
2080
+ assert search.score(X, y) == pytest.approx(cm[0, 1])
2081
+
2082
+
2083
+ def test_callable_multimetric_same_as_list_of_strings():
2084
+ # Test callable multimetric is the same as a list of strings
2085
+ def custom_scorer(est, X, y):
2086
+ y_pred = est.predict(X)
2087
+ return {
2088
+ "recall": recall_score(y, y_pred),
2089
+ "accuracy": accuracy_score(y, y_pred),
2090
+ }
2091
+
2092
+ X, y = make_classification(n_samples=40, n_features=4, random_state=42)
2093
+ est = LinearSVC(dual="auto", random_state=42)
2094
+ search_callable = GridSearchCV(
2095
+ est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="recall"
2096
+ )
2097
+ search_str = GridSearchCV(
2098
+ est, {"C": [0.1, 1]}, scoring=["recall", "accuracy"], refit="recall"
2099
+ )
2100
+
2101
+ search_callable.fit(X, y)
2102
+ search_str.fit(X, y)
2103
+
2104
+ assert search_callable.best_score_ == pytest.approx(search_str.best_score_)
2105
+ assert search_callable.best_index_ == search_str.best_index_
2106
+ assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y))
2107
+
2108
+
2109
+ def test_callable_single_metric_same_as_single_string():
2110
+ # Tests callable scorer is the same as scoring with a single string
2111
+ def custom_scorer(est, X, y):
2112
+ y_pred = est.predict(X)
2113
+ return recall_score(y, y_pred)
2114
+
2115
+ X, y = make_classification(n_samples=40, n_features=4, random_state=42)
2116
+ est = LinearSVC(dual="auto", random_state=42)
2117
+ search_callable = GridSearchCV(
2118
+ est, {"C": [0.1, 1]}, scoring=custom_scorer, refit=True
2119
+ )
2120
+ search_str = GridSearchCV(est, {"C": [0.1, 1]}, scoring="recall", refit="recall")
2121
+ search_list_str = GridSearchCV(
2122
+ est, {"C": [0.1, 1]}, scoring=["recall"], refit="recall"
2123
+ )
2124
+ search_callable.fit(X, y)
2125
+ search_str.fit(X, y)
2126
+ search_list_str.fit(X, y)
2127
+
2128
+ assert search_callable.best_score_ == pytest.approx(search_str.best_score_)
2129
+ assert search_callable.best_index_ == search_str.best_index_
2130
+ assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y))
2131
+
2132
+ assert search_list_str.best_score_ == pytest.approx(search_str.best_score_)
2133
+ assert search_list_str.best_index_ == search_str.best_index_
2134
+ assert search_list_str.score(X, y) == pytest.approx(search_str.score(X, y))
2135
+
2136
+
2137
+ def test_callable_multimetric_error_on_invalid_key():
2138
+ # Raises when the callable scorer does not return a dict with `refit` key.
2139
+ def bad_scorer(est, X, y):
2140
+ return {"bad_name": 1}
2141
+
2142
+ X, y = make_classification(n_samples=40, n_features=4, random_state=42)
2143
+ clf = GridSearchCV(
2144
+ LinearSVC(dual="auto", random_state=42),
2145
+ {"C": [0.1, 1]},
2146
+ scoring=bad_scorer,
2147
+ refit="good_name",
2148
+ )
2149
+
2150
+ msg = (
2151
+ "For multi-metric scoring, the parameter refit must be set to a "
2152
+ "scorer key or a callable to refit"
2153
+ )
2154
+ with pytest.raises(ValueError, match=msg):
2155
+ clf.fit(X, y)
2156
+
2157
+
2158
+ def test_callable_multimetric_error_failing_clf():
2159
+ # Warns when there is an estimator the fails to fit with a float
2160
+ # error_score
2161
+ def custom_scorer(est, X, y):
2162
+ return {"acc": 1}
2163
+
2164
+ X, y = make_classification(n_samples=20, n_features=10, random_state=0)
2165
+
2166
+ clf = FailingClassifier()
2167
+ gs = GridSearchCV(
2168
+ clf,
2169
+ [{"parameter": [0, 1, 2]}],
2170
+ scoring=custom_scorer,
2171
+ refit=False,
2172
+ error_score=0.1,
2173
+ )
2174
+
2175
+ warning_message = re.compile(
2176
+ "5 fits failed.+total of 15.+The score on these"
2177
+ r" train-test partitions for these parameters will be set to 0\.1",
2178
+ flags=re.DOTALL,
2179
+ )
2180
+ with pytest.warns(FitFailedWarning, match=warning_message):
2181
+ gs.fit(X, y)
2182
+
2183
+ assert_allclose(gs.cv_results_["mean_test_acc"], [1, 1, 0.1])
2184
+
2185
+
2186
+ def test_callable_multimetric_clf_all_fits_fail():
2187
+ # Warns and raises when all estimator fails to fit.
2188
+ def custom_scorer(est, X, y):
2189
+ return {"acc": 1}
2190
+
2191
+ X, y = make_classification(n_samples=20, n_features=10, random_state=0)
2192
+
2193
+ clf = FailingClassifier()
2194
+
2195
+ gs = GridSearchCV(
2196
+ clf,
2197
+ [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}],
2198
+ scoring=custom_scorer,
2199
+ refit=False,
2200
+ error_score=0.1,
2201
+ )
2202
+
2203
+ individual_fit_error_message = "ValueError: Failing classifier failed as required"
2204
+ error_message = re.compile(
2205
+ (
2206
+ "All the 15 fits failed.+your model is misconfigured.+"
2207
+ f"{individual_fit_error_message}"
2208
+ ),
2209
+ flags=re.DOTALL,
2210
+ )
2211
+
2212
+ with pytest.raises(ValueError, match=error_message):
2213
+ gs.fit(X, y)
2214
+
2215
+
2216
+ def test_n_features_in():
2217
+ # make sure grid search and random search delegate n_features_in to the
2218
+ # best estimator
2219
+ n_features = 4
2220
+ X, y = make_classification(n_features=n_features)
2221
+ gbdt = HistGradientBoostingClassifier()
2222
+ param_grid = {"max_iter": [3, 4]}
2223
+ gs = GridSearchCV(gbdt, param_grid)
2224
+ rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1)
2225
+ assert not hasattr(gs, "n_features_in_")
2226
+ assert not hasattr(rs, "n_features_in_")
2227
+ gs.fit(X, y)
2228
+ rs.fit(X, y)
2229
+ assert gs.n_features_in_ == n_features
2230
+ assert rs.n_features_in_ == n_features
2231
+
2232
+
2233
+ @pytest.mark.parametrize("pairwise", [True, False])
2234
+ def test_search_cv_pairwise_property_delegated_to_base_estimator(pairwise):
2235
+ """
2236
+ Test implementation of BaseSearchCV has the pairwise tag
2237
+ which matches the pairwise tag of its estimator.
2238
+ This test make sure pairwise tag is delegated to the base estimator.
2239
+
2240
+ Non-regression test for issue #13920.
2241
+ """
2242
+
2243
+ class TestEstimator(BaseEstimator):
2244
+ def _more_tags(self):
2245
+ return {"pairwise": pairwise}
2246
+
2247
+ est = TestEstimator()
2248
+ attr_message = "BaseSearchCV pairwise tag must match estimator"
2249
+ cv = GridSearchCV(est, {"n_neighbors": [10]})
2250
+ assert pairwise == cv._get_tags()["pairwise"], attr_message
2251
+
2252
+
2253
+ def test_search_cv__pairwise_property_delegated_to_base_estimator():
2254
+ """
2255
+ Test implementation of BaseSearchCV has the pairwise property
2256
+ which matches the pairwise tag of its estimator.
2257
+ This test make sure pairwise tag is delegated to the base estimator.
2258
+
2259
+ Non-regression test for issue #13920.
2260
+ """
2261
+
2262
+ class EstimatorPairwise(BaseEstimator):
2263
+ def __init__(self, pairwise=True):
2264
+ self.pairwise = pairwise
2265
+
2266
+ def _more_tags(self):
2267
+ return {"pairwise": self.pairwise}
2268
+
2269
+ est = EstimatorPairwise()
2270
+ attr_message = "BaseSearchCV _pairwise property must match estimator"
2271
+
2272
+ for _pairwise_setting in [True, False]:
2273
+ est.set_params(pairwise=_pairwise_setting)
2274
+ cv = GridSearchCV(est, {"n_neighbors": [10]})
2275
+ assert _pairwise_setting == cv._get_tags()["pairwise"], attr_message
2276
+
2277
+
2278
+ def test_search_cv_pairwise_property_equivalence_of_precomputed():
2279
+ """
2280
+ Test implementation of BaseSearchCV has the pairwise tag
2281
+ which matches the pairwise tag of its estimator.
2282
+ This test ensures the equivalence of 'precomputed'.
2283
+
2284
+ Non-regression test for issue #13920.
2285
+ """
2286
+ n_samples = 50
2287
+ n_splits = 2
2288
+ X, y = make_classification(n_samples=n_samples, random_state=0)
2289
+ grid_params = {"n_neighbors": [10]}
2290
+
2291
+ # defaults to euclidean metric (minkowski p = 2)
2292
+ clf = KNeighborsClassifier()
2293
+ cv = GridSearchCV(clf, grid_params, cv=n_splits)
2294
+ cv.fit(X, y)
2295
+ preds_original = cv.predict(X)
2296
+
2297
+ # precompute euclidean metric to validate pairwise is working
2298
+ X_precomputed = euclidean_distances(X)
2299
+ clf = KNeighborsClassifier(metric="precomputed")
2300
+ cv = GridSearchCV(clf, grid_params, cv=n_splits)
2301
+ cv.fit(X_precomputed, y)
2302
+ preds_precomputed = cv.predict(X_precomputed)
2303
+
2304
+ attr_message = "GridSearchCV not identical with precomputed metric"
2305
+ assert (preds_original == preds_precomputed).all(), attr_message
2306
+
2307
+
2308
+ @pytest.mark.parametrize(
2309
+ "SearchCV, param_search",
2310
+ [(GridSearchCV, {"a": [0.1, 0.01]}), (RandomizedSearchCV, {"a": uniform(1, 3)})],
2311
+ )
2312
+ def test_scalar_fit_param(SearchCV, param_search):
2313
+ # unofficially sanctioned tolerance for scalar values in fit_params
2314
+ # non-regression test for:
2315
+ # https://github.com/scikit-learn/scikit-learn/issues/15805
2316
+ class TestEstimator(ClassifierMixin, BaseEstimator):
2317
+ def __init__(self, a=None):
2318
+ self.a = a
2319
+
2320
+ def fit(self, X, y, r=None):
2321
+ self.r_ = r
2322
+
2323
+ def predict(self, X):
2324
+ return np.zeros(shape=(len(X)))
2325
+
2326
+ model = SearchCV(TestEstimator(), param_search)
2327
+ X, y = make_classification(random_state=42)
2328
+ model.fit(X, y, r=42)
2329
+ assert model.best_estimator_.r_ == 42
2330
+
2331
+
2332
+ @pytest.mark.parametrize(
2333
+ "SearchCV, param_search",
2334
+ [
2335
+ (GridSearchCV, {"alpha": [0.1, 0.01]}),
2336
+ (RandomizedSearchCV, {"alpha": uniform(0.01, 0.1)}),
2337
+ ],
2338
+ )
2339
+ def test_scalar_fit_param_compat(SearchCV, param_search):
2340
+ # check support for scalar values in fit_params, for instance in LightGBM
2341
+ # that do not exactly respect the scikit-learn API contract but that we do
2342
+ # not want to break without an explicit deprecation cycle and API
2343
+ # recommendations for implementing early stopping with a user provided
2344
+ # validation set. non-regression test for:
2345
+ # https://github.com/scikit-learn/scikit-learn/issues/15805
2346
+ X_train, X_valid, y_train, y_valid = train_test_split(
2347
+ *make_classification(random_state=42), random_state=42
2348
+ )
2349
+
2350
+ class _FitParamClassifier(SGDClassifier):
2351
+ def fit(
2352
+ self,
2353
+ X,
2354
+ y,
2355
+ sample_weight=None,
2356
+ tuple_of_arrays=None,
2357
+ scalar_param=None,
2358
+ callable_param=None,
2359
+ ):
2360
+ super().fit(X, y, sample_weight=sample_weight)
2361
+ assert scalar_param > 0
2362
+ assert callable(callable_param)
2363
+
2364
+ # The tuple of arrays should be preserved as tuple.
2365
+ assert isinstance(tuple_of_arrays, tuple)
2366
+ assert tuple_of_arrays[0].ndim == 2
2367
+ assert tuple_of_arrays[1].ndim == 1
2368
+ return self
2369
+
2370
+ def _fit_param_callable():
2371
+ pass
2372
+
2373
+ model = SearchCV(_FitParamClassifier(), param_search)
2374
+
2375
+ # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which
2376
+ # is not the case for the following parameters. But this abuse is common in
2377
+ # popular third-party libraries and we should tolerate this behavior for
2378
+ # now and be careful not to break support for those without following
2379
+ # proper deprecation cycle.
2380
+ fit_params = {
2381
+ "tuple_of_arrays": (X_valid, y_valid),
2382
+ "callable_param": _fit_param_callable,
2383
+ "scalar_param": 42,
2384
+ }
2385
+ model.fit(X_train, y_train, **fit_params)
2386
+
2387
+
2388
+ # FIXME: Replace this test with a full `check_estimator` once we have API only
2389
+ # checks.
2390
+ @pytest.mark.filterwarnings("ignore:The total space of parameters 4 is")
2391
+ @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
2392
+ @pytest.mark.parametrize("Predictor", [MinimalRegressor, MinimalClassifier])
2393
+ def test_search_cv_using_minimal_compatible_estimator(SearchCV, Predictor):
2394
+ # Check that third-party library can run tests without inheriting from
2395
+ # BaseEstimator.
2396
+ rng = np.random.RandomState(0)
2397
+ X, y = rng.randn(25, 2), np.array([0] * 5 + [1] * 20)
2398
+
2399
+ model = Pipeline(
2400
+ [("transformer", MinimalTransformer()), ("predictor", Predictor())]
2401
+ )
2402
+
2403
+ params = {
2404
+ "transformer__param": [1, 10],
2405
+ "predictor__parama": [1, 10],
2406
+ }
2407
+ search = SearchCV(model, params, error_score="raise")
2408
+ search.fit(X, y)
2409
+
2410
+ assert search.best_params_.keys() == params.keys()
2411
+
2412
+ y_pred = search.predict(X)
2413
+ if is_classifier(search):
2414
+ assert_array_equal(y_pred, 1)
2415
+ assert search.score(X, y) == pytest.approx(accuracy_score(y, y_pred))
2416
+ else:
2417
+ assert_allclose(y_pred, y.mean())
2418
+ assert search.score(X, y) == pytest.approx(r2_score(y, y_pred))
2419
+
2420
+
2421
+ @pytest.mark.parametrize("return_train_score", [True, False])
2422
+ def test_search_cv_verbose_3(capsys, return_train_score):
2423
+ """Check that search cv with verbose>2 shows the score for single
2424
+ metrics. non-regression test for #19658."""
2425
+ X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0)
2426
+ clf = LinearSVC(dual="auto", random_state=0)
2427
+ grid = {"C": [0.1]}
2428
+
2429
+ GridSearchCV(
2430
+ clf,
2431
+ grid,
2432
+ scoring="accuracy",
2433
+ verbose=3,
2434
+ cv=3,
2435
+ return_train_score=return_train_score,
2436
+ ).fit(X, y)
2437
+ captured = capsys.readouterr().out
2438
+ if return_train_score:
2439
+ match = re.findall(r"score=\(train=[\d\.]+, test=[\d.]+\)", captured)
2440
+ else:
2441
+ match = re.findall(r"score=[\d\.]+", captured)
2442
+ assert len(match) == 3
2443
+
2444
+
2445
+ @pytest.mark.parametrize(
2446
+ "SearchCV, param_search",
2447
+ [
2448
+ (GridSearchCV, "param_grid"),
2449
+ (RandomizedSearchCV, "param_distributions"),
2450
+ (HalvingGridSearchCV, "param_grid"),
2451
+ ],
2452
+ )
2453
+ def test_search_estimator_param(SearchCV, param_search):
2454
+ # test that SearchCV object doesn't change the object given in the parameter grid
2455
+ X, y = make_classification(random_state=42)
2456
+
2457
+ params = {"clf": [LinearSVC(dual="auto")], "clf__C": [0.01]}
2458
+ orig_C = params["clf"][0].C
2459
+
2460
+ pipe = Pipeline([("trs", MinimalTransformer()), ("clf", None)])
2461
+
2462
+ param_grid_search = {param_search: params}
2463
+ gs = SearchCV(pipe, refit=True, cv=2, scoring="accuracy", **param_grid_search).fit(
2464
+ X, y
2465
+ )
2466
+
2467
+ # testing that the original object in params is not changed
2468
+ assert params["clf"][0].C == orig_C
2469
+ # testing that the GS is setting the parameter of the step correctly
2470
+ assert gs.best_estimator_.named_steps["clf"].C == 0.01
2471
+
2472
+
2473
+ # Metadata Routing Tests
2474
+ # ======================
2475
+
2476
+
2477
+ @pytest.mark.usefixtures("enable_slep006")
2478
+ @pytest.mark.parametrize(
2479
+ "SearchCV, param_search",
2480
+ [
2481
+ (GridSearchCV, "param_grid"),
2482
+ (RandomizedSearchCV, "param_distributions"),
2483
+ ],
2484
+ )
2485
+ def test_multi_metric_search_forwards_metadata(SearchCV, param_search):
2486
+ """Test that *SearchCV forwards metadata correctly when passed multiple metrics."""
2487
+ X, y = make_classification(random_state=42)
2488
+ n_samples = _num_samples(X)
2489
+ rng = np.random.RandomState(0)
2490
+ score_weights = rng.rand(n_samples)
2491
+ score_metadata = rng.rand(n_samples)
2492
+
2493
+ est = LinearSVC(dual="auto")
2494
+ param_grid_search = {param_search: {"C": [1]}}
2495
+
2496
+ scorer_registry = _Registry()
2497
+ scorer = ConsumingScorer(registry=scorer_registry).set_score_request(
2498
+ sample_weight="score_weights", metadata="score_metadata"
2499
+ )
2500
+ scoring = dict(my_scorer=scorer, accuracy="accuracy")
2501
+ SearchCV(est, refit="accuracy", cv=2, scoring=scoring, **param_grid_search).fit(
2502
+ X, y, score_weights=score_weights, score_metadata=score_metadata
2503
+ )
2504
+ assert len(scorer_registry)
2505
+ for _scorer in scorer_registry:
2506
+ check_recorded_metadata(
2507
+ obj=_scorer,
2508
+ method="score",
2509
+ split_params=("sample_weight", "metadata"),
2510
+ sample_weight=score_weights,
2511
+ metadata=score_metadata,
2512
+ )
2513
+
2514
+
2515
+ @pytest.mark.parametrize(
2516
+ "SearchCV, param_search",
2517
+ [
2518
+ (GridSearchCV, "param_grid"),
2519
+ (RandomizedSearchCV, "param_distributions"),
2520
+ (HalvingGridSearchCV, "param_grid"),
2521
+ ],
2522
+ )
2523
+ def test_score_rejects_params_with_no_routing_enabled(SearchCV, param_search):
2524
+ """*SearchCV should reject **params when metadata routing is not enabled
2525
+ since this is added only when routing is enabled."""
2526
+ X, y = make_classification(random_state=42)
2527
+ est = LinearSVC(dual="auto")
2528
+ param_grid_search = {param_search: {"C": [1]}}
2529
+
2530
+ gs = SearchCV(est, cv=2, **param_grid_search).fit(X, y)
2531
+
2532
+ with pytest.raises(ValueError, match="is only supported if"):
2533
+ gs.score(X, y, metadata=1)
2534
+
2535
+
2536
+ # End of Metadata Routing Tests
2537
+ # =============================
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py ADDED
@@ -0,0 +1,2025 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the split module"""
2
+ import re
3
+ import warnings
4
+ from itertools import combinations, combinations_with_replacement, permutations
5
+
6
+ import numpy as np
7
+ import pytest
8
+ from scipy import stats
9
+ from scipy.sparse import issparse
10
+ from scipy.special import comb
11
+
12
+ from sklearn import config_context
13
+ from sklearn.datasets import load_digits, make_classification
14
+ from sklearn.dummy import DummyClassifier
15
+ from sklearn.model_selection import (
16
+ GridSearchCV,
17
+ GroupKFold,
18
+ GroupShuffleSplit,
19
+ KFold,
20
+ LeaveOneGroupOut,
21
+ LeaveOneOut,
22
+ LeavePGroupsOut,
23
+ LeavePOut,
24
+ PredefinedSplit,
25
+ RepeatedKFold,
26
+ RepeatedStratifiedKFold,
27
+ ShuffleSplit,
28
+ StratifiedGroupKFold,
29
+ StratifiedKFold,
30
+ StratifiedShuffleSplit,
31
+ TimeSeriesSplit,
32
+ check_cv,
33
+ cross_val_score,
34
+ train_test_split,
35
+ )
36
+ from sklearn.model_selection._split import (
37
+ _build_repr,
38
+ _validate_shuffle_split,
39
+ _yields_constant_splits,
40
+ )
41
+ from sklearn.svm import SVC
42
+ from sklearn.tests.metadata_routing_common import assert_request_is_empty
43
+ from sklearn.utils._array_api import (
44
+ _convert_to_numpy,
45
+ get_namespace,
46
+ yield_namespace_device_dtype_combinations,
47
+ )
48
+ from sklearn.utils._array_api import (
49
+ device as array_api_device,
50
+ )
51
+ from sklearn.utils._mocking import MockDataFrame
52
+ from sklearn.utils._testing import (
53
+ assert_allclose,
54
+ assert_array_almost_equal,
55
+ assert_array_equal,
56
+ ignore_warnings,
57
+ )
58
+ from sklearn.utils.estimator_checks import (
59
+ _array_api_for_tests,
60
+ )
61
+ from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
62
+ from sklearn.utils.validation import _num_samples
63
+
64
+ NO_GROUP_SPLITTERS = [
65
+ KFold(),
66
+ StratifiedKFold(),
67
+ TimeSeriesSplit(),
68
+ LeaveOneOut(),
69
+ LeavePOut(p=2),
70
+ ShuffleSplit(),
71
+ StratifiedShuffleSplit(test_size=0.5),
72
+ PredefinedSplit([1, 1, 2, 2]),
73
+ RepeatedKFold(),
74
+ RepeatedStratifiedKFold(),
75
+ ]
76
+
77
+ GROUP_SPLITTERS = [
78
+ GroupKFold(),
79
+ LeavePGroupsOut(n_groups=1),
80
+ StratifiedGroupKFold(),
81
+ LeaveOneGroupOut(),
82
+ GroupShuffleSplit(),
83
+ ]
84
+
85
+ ALL_SPLITTERS = NO_GROUP_SPLITTERS + GROUP_SPLITTERS # type: ignore
86
+
87
+ X = np.ones(10)
88
+ y = np.arange(10) // 2
89
+ test_groups = (
90
+ np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
91
+ np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
92
+ np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
93
+ np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
94
+ [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
95
+ ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"],
96
+ )
97
+ digits = load_digits()
98
+
99
+
100
+ @ignore_warnings
101
+ def test_cross_validator_with_default_params():
102
+ n_samples = 4
103
+ n_unique_groups = 4
104
+ n_splits = 2
105
+ p = 2
106
+ n_shuffle_splits = 10 # (the default value)
107
+
108
+ X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
109
+ X_1d = np.array([1, 2, 3, 4])
110
+ y = np.array([1, 1, 2, 2])
111
+ groups = np.array([1, 2, 3, 4])
112
+ loo = LeaveOneOut()
113
+ lpo = LeavePOut(p)
114
+ kf = KFold(n_splits)
115
+ skf = StratifiedKFold(n_splits)
116
+ lolo = LeaveOneGroupOut()
117
+ lopo = LeavePGroupsOut(p)
118
+ ss = ShuffleSplit(random_state=0)
119
+ ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
120
+ sgkf = StratifiedGroupKFold(n_splits)
121
+
122
+ loo_repr = "LeaveOneOut()"
123
+ lpo_repr = "LeavePOut(p=2)"
124
+ kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
125
+ skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
126
+ lolo_repr = "LeaveOneGroupOut()"
127
+ lopo_repr = "LeavePGroupsOut(n_groups=2)"
128
+ ss_repr = (
129
+ "ShuffleSplit(n_splits=10, random_state=0, test_size=None, train_size=None)"
130
+ )
131
+ ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
132
+ sgkf_repr = "StratifiedGroupKFold(n_splits=2, random_state=None, shuffle=False)"
133
+
134
+ n_splits_expected = [
135
+ n_samples,
136
+ comb(n_samples, p),
137
+ n_splits,
138
+ n_splits,
139
+ n_unique_groups,
140
+ comb(n_unique_groups, p),
141
+ n_shuffle_splits,
142
+ 2,
143
+ n_splits,
144
+ ]
145
+
146
+ for i, (cv, cv_repr) in enumerate(
147
+ zip(
148
+ [loo, lpo, kf, skf, lolo, lopo, ss, ps, sgkf],
149
+ [
150
+ loo_repr,
151
+ lpo_repr,
152
+ kf_repr,
153
+ skf_repr,
154
+ lolo_repr,
155
+ lopo_repr,
156
+ ss_repr,
157
+ ps_repr,
158
+ sgkf_repr,
159
+ ],
160
+ )
161
+ ):
162
+ # Test if get_n_splits works correctly
163
+ assert n_splits_expected[i] == cv.get_n_splits(X, y, groups)
164
+
165
+ # Test if the cross-validator works as expected even if
166
+ # the data is 1d
167
+ np.testing.assert_equal(
168
+ list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups))
169
+ )
170
+ # Test that train, test indices returned are integers
171
+ for train, test in cv.split(X, y, groups):
172
+ assert np.asarray(train).dtype.kind == "i"
173
+ assert np.asarray(test).dtype.kind == "i"
174
+
175
+ # Test if the repr works without any errors
176
+ assert cv_repr == repr(cv)
177
+
178
+ # ValueError for get_n_splits methods
179
+ msg = "The 'X' parameter should not be None."
180
+ with pytest.raises(ValueError, match=msg):
181
+ loo.get_n_splits(None, y, groups)
182
+ with pytest.raises(ValueError, match=msg):
183
+ lpo.get_n_splits(None, y, groups)
184
+
185
+
186
+ def test_2d_y():
187
+ # smoke test for 2d y and multi-label
188
+ n_samples = 30
189
+ rng = np.random.RandomState(1)
190
+ X = rng.randint(0, 3, size=(n_samples, 2))
191
+ y = rng.randint(0, 3, size=(n_samples,))
192
+ y_2d = y.reshape(-1, 1)
193
+ y_multilabel = rng.randint(0, 2, size=(n_samples, 3))
194
+ groups = rng.randint(0, 3, size=(n_samples,))
195
+ splitters = [
196
+ LeaveOneOut(),
197
+ LeavePOut(p=2),
198
+ KFold(),
199
+ StratifiedKFold(),
200
+ RepeatedKFold(),
201
+ RepeatedStratifiedKFold(),
202
+ StratifiedGroupKFold(),
203
+ ShuffleSplit(),
204
+ StratifiedShuffleSplit(test_size=0.5),
205
+ GroupShuffleSplit(),
206
+ LeaveOneGroupOut(),
207
+ LeavePGroupsOut(n_groups=2),
208
+ GroupKFold(n_splits=3),
209
+ TimeSeriesSplit(),
210
+ PredefinedSplit(test_fold=groups),
211
+ ]
212
+ for splitter in splitters:
213
+ list(splitter.split(X, y, groups))
214
+ list(splitter.split(X, y_2d, groups))
215
+ try:
216
+ list(splitter.split(X, y_multilabel, groups))
217
+ except ValueError as e:
218
+ allowed_target_types = ("binary", "multiclass")
219
+ msg = "Supported target types are: {}. Got 'multilabel".format(
220
+ allowed_target_types
221
+ )
222
+ assert msg in str(e)
223
+
224
+
225
+ def check_valid_split(train, test, n_samples=None):
226
+ # Use python sets to get more informative assertion failure messages
227
+ train, test = set(train), set(test)
228
+
229
+ # Train and test split should not overlap
230
+ assert train.intersection(test) == set()
231
+
232
+ if n_samples is not None:
233
+ # Check that the union of train an test split cover all the indices
234
+ assert train.union(test) == set(range(n_samples))
235
+
236
+
237
+ def check_cv_coverage(cv, X, y, groups, expected_n_splits):
238
+ n_samples = _num_samples(X)
239
+ # Check that a all the samples appear at least once in a test fold
240
+ assert cv.get_n_splits(X, y, groups) == expected_n_splits
241
+
242
+ collected_test_samples = set()
243
+ iterations = 0
244
+ for train, test in cv.split(X, y, groups):
245
+ check_valid_split(train, test, n_samples=n_samples)
246
+ iterations += 1
247
+ collected_test_samples.update(test)
248
+
249
+ # Check that the accumulated test samples cover the whole dataset
250
+ assert iterations == expected_n_splits
251
+ if n_samples is not None:
252
+ assert collected_test_samples == set(range(n_samples))
253
+
254
+
255
+ def test_kfold_valueerrors():
256
+ X1 = np.array([[1, 2], [3, 4], [5, 6]])
257
+ X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
258
+ # Check that errors are raised if there is not enough samples
259
+ (ValueError, next, KFold(4).split(X1))
260
+
261
+ # Check that a warning is raised if the least populated class has too few
262
+ # members.
263
+ y = np.array([3, 3, -1, -1, 3])
264
+
265
+ skf_3 = StratifiedKFold(3)
266
+ with pytest.warns(Warning, match="The least populated class"):
267
+ next(skf_3.split(X2, y))
268
+
269
+ sgkf_3 = StratifiedGroupKFold(3)
270
+ naive_groups = np.arange(len(y))
271
+ with pytest.warns(Warning, match="The least populated class"):
272
+ next(sgkf_3.split(X2, y, naive_groups))
273
+
274
+ # Check that despite the warning the folds are still computed even
275
+ # though all the classes are not necessarily represented at on each
276
+ # side of the split at each split
277
+ with warnings.catch_warnings():
278
+ warnings.simplefilter("ignore")
279
+ check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
280
+
281
+ with warnings.catch_warnings():
282
+ warnings.simplefilter("ignore")
283
+ check_cv_coverage(sgkf_3, X2, y, groups=naive_groups, expected_n_splits=3)
284
+
285
+ # Check that errors are raised if all n_groups for individual
286
+ # classes are less than n_splits.
287
+ y = np.array([3, 3, -1, -1, 2])
288
+
289
+ with pytest.raises(ValueError):
290
+ next(skf_3.split(X2, y))
291
+ with pytest.raises(ValueError):
292
+ next(sgkf_3.split(X2, y))
293
+
294
+ # Error when number of folds is <= 1
295
+ with pytest.raises(ValueError):
296
+ KFold(0)
297
+ with pytest.raises(ValueError):
298
+ KFold(1)
299
+ error_string = "k-fold cross-validation requires at least one train/test split"
300
+ with pytest.raises(ValueError, match=error_string):
301
+ StratifiedKFold(0)
302
+ with pytest.raises(ValueError, match=error_string):
303
+ StratifiedKFold(1)
304
+ with pytest.raises(ValueError, match=error_string):
305
+ StratifiedGroupKFold(0)
306
+ with pytest.raises(ValueError, match=error_string):
307
+ StratifiedGroupKFold(1)
308
+
309
+ # When n_splits is not integer:
310
+ with pytest.raises(ValueError):
311
+ KFold(1.5)
312
+ with pytest.raises(ValueError):
313
+ KFold(2.0)
314
+ with pytest.raises(ValueError):
315
+ StratifiedKFold(1.5)
316
+ with pytest.raises(ValueError):
317
+ StratifiedKFold(2.0)
318
+ with pytest.raises(ValueError):
319
+ StratifiedGroupKFold(1.5)
320
+ with pytest.raises(ValueError):
321
+ StratifiedGroupKFold(2.0)
322
+
323
+ # When shuffle is not a bool:
324
+ with pytest.raises(TypeError):
325
+ KFold(n_splits=4, shuffle=None)
326
+
327
+
328
+ def test_kfold_indices():
329
+ # Check all indices are returned in the test folds
330
+ X1 = np.ones(18)
331
+ kf = KFold(3)
332
+ check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
333
+
334
+ # Check all indices are returned in the test folds even when equal-sized
335
+ # folds are not possible
336
+ X2 = np.ones(17)
337
+ kf = KFold(3)
338
+ check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
339
+
340
+ # Check if get_n_splits returns the number of folds
341
+ assert 5 == KFold(5).get_n_splits(X2)
342
+
343
+
344
+ def test_kfold_no_shuffle():
345
+ # Manually check that KFold preserves the data ordering on toy datasets
346
+ X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
347
+
348
+ splits = KFold(2).split(X2[:-1])
349
+ train, test = next(splits)
350
+ assert_array_equal(test, [0, 1])
351
+ assert_array_equal(train, [2, 3])
352
+
353
+ train, test = next(splits)
354
+ assert_array_equal(test, [2, 3])
355
+ assert_array_equal(train, [0, 1])
356
+
357
+ splits = KFold(2).split(X2)
358
+ train, test = next(splits)
359
+ assert_array_equal(test, [0, 1, 2])
360
+ assert_array_equal(train, [3, 4])
361
+
362
+ train, test = next(splits)
363
+ assert_array_equal(test, [3, 4])
364
+ assert_array_equal(train, [0, 1, 2])
365
+
366
+
367
+ def test_stratified_kfold_no_shuffle():
368
+ # Manually check that StratifiedKFold preserves the data ordering as much
369
+ # as possible on toy datasets in order to avoid hiding sample dependencies
370
+ # when possible
371
+ X, y = np.ones(4), [1, 1, 0, 0]
372
+ splits = StratifiedKFold(2).split(X, y)
373
+ train, test = next(splits)
374
+ assert_array_equal(test, [0, 2])
375
+ assert_array_equal(train, [1, 3])
376
+
377
+ train, test = next(splits)
378
+ assert_array_equal(test, [1, 3])
379
+ assert_array_equal(train, [0, 2])
380
+
381
+ X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
382
+ splits = StratifiedKFold(2).split(X, y)
383
+ train, test = next(splits)
384
+ assert_array_equal(test, [0, 1, 3, 4])
385
+ assert_array_equal(train, [2, 5, 6])
386
+
387
+ train, test = next(splits)
388
+ assert_array_equal(test, [2, 5, 6])
389
+ assert_array_equal(train, [0, 1, 3, 4])
390
+
391
+ # Check if get_n_splits returns the number of folds
392
+ assert 5 == StratifiedKFold(5).get_n_splits(X, y)
393
+
394
+ # Make sure string labels are also supported
395
+ X = np.ones(7)
396
+ y1 = ["1", "1", "1", "0", "0", "0", "0"]
397
+ y2 = [1, 1, 1, 0, 0, 0, 0]
398
+ np.testing.assert_equal(
399
+ list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2))
400
+ )
401
+
402
+ # Check equivalence to KFold
403
+ y = [0, 1, 0, 1, 0, 1, 0, 1]
404
+ X = np.ones_like(y)
405
+ np.testing.assert_equal(
406
+ list(StratifiedKFold(3).split(X, y)), list(KFold(3).split(X, y))
407
+ )
408
+
409
+
410
+ @pytest.mark.parametrize("shuffle", [False, True])
411
+ @pytest.mark.parametrize("k", [4, 5, 6, 7, 8, 9, 10])
412
+ @pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold])
413
+ def test_stratified_kfold_ratios(k, shuffle, kfold):
414
+ # Check that stratified kfold preserves class ratios in individual splits
415
+ # Repeat with shuffling turned off and on
416
+ n_samples = 1000
417
+ X = np.ones(n_samples)
418
+ y = np.array(
419
+ [4] * int(0.10 * n_samples)
420
+ + [0] * int(0.89 * n_samples)
421
+ + [1] * int(0.01 * n_samples)
422
+ )
423
+ # ensure perfect stratification with StratifiedGroupKFold
424
+ groups = np.arange(len(y))
425
+ distr = np.bincount(y) / len(y)
426
+
427
+ test_sizes = []
428
+ random_state = None if not shuffle else 0
429
+ skf = kfold(k, random_state=random_state, shuffle=shuffle)
430
+ for train, test in skf.split(X, y, groups=groups):
431
+ assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02)
432
+ assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02)
433
+ test_sizes.append(len(test))
434
+ assert np.ptp(test_sizes) <= 1
435
+
436
+
437
+ @pytest.mark.parametrize("shuffle", [False, True])
438
+ @pytest.mark.parametrize("k", [4, 6, 7])
439
+ @pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold])
440
+ def test_stratified_kfold_label_invariance(k, shuffle, kfold):
441
+ # Check that stratified kfold gives the same indices regardless of labels
442
+ n_samples = 100
443
+ y = np.array(
444
+ [2] * int(0.10 * n_samples)
445
+ + [0] * int(0.89 * n_samples)
446
+ + [1] * int(0.01 * n_samples)
447
+ )
448
+ X = np.ones(len(y))
449
+ # ensure perfect stratification with StratifiedGroupKFold
450
+ groups = np.arange(len(y))
451
+
452
+ def get_splits(y):
453
+ random_state = None if not shuffle else 0
454
+ return [
455
+ (list(train), list(test))
456
+ for train, test in kfold(
457
+ k, random_state=random_state, shuffle=shuffle
458
+ ).split(X, y, groups=groups)
459
+ ]
460
+
461
+ splits_base = get_splits(y)
462
+ for perm in permutations([0, 1, 2]):
463
+ y_perm = np.take(perm, y)
464
+ splits_perm = get_splits(y_perm)
465
+ assert splits_perm == splits_base
466
+
467
+
468
+ def test_kfold_balance():
469
+ # Check that KFold returns folds with balanced sizes
470
+ for i in range(11, 17):
471
+ kf = KFold(5).split(X=np.ones(i))
472
+ sizes = [len(test) for _, test in kf]
473
+
474
+ assert (np.max(sizes) - np.min(sizes)) <= 1
475
+ assert np.sum(sizes) == i
476
+
477
+
478
+ @pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold])
479
+ def test_stratifiedkfold_balance(kfold):
480
+ # Check that KFold returns folds with balanced sizes (only when
481
+ # stratification is possible)
482
+ # Repeat with shuffling turned off and on
483
+ X = np.ones(17)
484
+ y = [0] * 3 + [1] * 14
485
+ # ensure perfect stratification with StratifiedGroupKFold
486
+ groups = np.arange(len(y))
487
+
488
+ for shuffle in (True, False):
489
+ cv = kfold(3, shuffle=shuffle)
490
+ for i in range(11, 17):
491
+ skf = cv.split(X[:i], y[:i], groups[:i])
492
+ sizes = [len(test) for _, test in skf]
493
+
494
+ assert (np.max(sizes) - np.min(sizes)) <= 1
495
+ assert np.sum(sizes) == i
496
+
497
+
498
+ def test_shuffle_kfold():
499
+ # Check the indices are shuffled properly
500
+ kf = KFold(3)
501
+ kf2 = KFold(3, shuffle=True, random_state=0)
502
+ kf3 = KFold(3, shuffle=True, random_state=1)
503
+
504
+ X = np.ones(300)
505
+
506
+ all_folds = np.zeros(300)
507
+ for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
508
+ kf.split(X), kf2.split(X), kf3.split(X)
509
+ ):
510
+ for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
511
+ # Assert that there is no complete overlap
512
+ assert len(np.intersect1d(tr_a, tr_b)) != len(tr1)
513
+
514
+ # Set all test indices in successive iterations of kf2 to 1
515
+ all_folds[te2] = 1
516
+
517
+ # Check that all indices are returned in the different test folds
518
+ assert sum(all_folds) == 300
519
+
520
+
521
+ @pytest.mark.parametrize("kfold", [KFold, StratifiedKFold, StratifiedGroupKFold])
522
+ def test_shuffle_kfold_stratifiedkfold_reproducibility(kfold):
523
+ X = np.ones(15) # Divisible by 3
524
+ y = [0] * 7 + [1] * 8
525
+ groups_1 = np.arange(len(y))
526
+ X2 = np.ones(16) # Not divisible by 3
527
+ y2 = [0] * 8 + [1] * 8
528
+ groups_2 = np.arange(len(y2))
529
+
530
+ # Check that when the shuffle is True, multiple split calls produce the
531
+ # same split when random_state is int
532
+ kf = kfold(3, shuffle=True, random_state=0)
533
+
534
+ np.testing.assert_equal(
535
+ list(kf.split(X, y, groups_1)), list(kf.split(X, y, groups_1))
536
+ )
537
+
538
+ # Check that when the shuffle is True, multiple split calls often
539
+ # (not always) produce different splits when random_state is
540
+ # RandomState instance or None
541
+ kf = kfold(3, shuffle=True, random_state=np.random.RandomState(0))
542
+ for data in zip((X, X2), (y, y2), (groups_1, groups_2)):
543
+ # Test if the two splits are different cv
544
+ for (_, test_a), (_, test_b) in zip(kf.split(*data), kf.split(*data)):
545
+ # cv.split(...) returns an array of tuples, each tuple
546
+ # consisting of an array with train indices and test indices
547
+ # Ensure that the splits for data are not same
548
+ # when random state is not set
549
+ with pytest.raises(AssertionError):
550
+ np.testing.assert_array_equal(test_a, test_b)
551
+
552
+
553
+ def test_shuffle_stratifiedkfold():
554
+ # Check that shuffling is happening when requested, and for proper
555
+ # sample coverage
556
+ X_40 = np.ones(40)
557
+ y = [0] * 20 + [1] * 20
558
+ kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
559
+ kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
560
+ for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)):
561
+ assert set(test0) != set(test1)
562
+ check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
563
+
564
+ # Ensure that we shuffle each class's samples with different
565
+ # random_state in StratifiedKFold
566
+ # See https://github.com/scikit-learn/scikit-learn/pull/13124
567
+ X = np.arange(10)
568
+ y = [0] * 5 + [1] * 5
569
+ kf1 = StratifiedKFold(5, shuffle=True, random_state=0)
570
+ kf2 = StratifiedKFold(5, shuffle=True, random_state=1)
571
+ test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)])
572
+ test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)])
573
+ assert test_set1 != test_set2
574
+
575
+
576
+ def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
577
+ # The digits samples are dependent: they are apparently grouped by authors
578
+ # although we don't have any information on the groups segment locations
579
+ # for this data. We can highlight this fact by computing k-fold cross-
580
+ # validation with and without shuffling: we observe that the shuffling case
581
+ # wrongly makes the IID assumption and is therefore too optimistic: it
582
+ # estimates a much higher accuracy (around 0.93) than that the non
583
+ # shuffling variant (around 0.81).
584
+
585
+ X, y = digits.data[:600], digits.target[:600]
586
+ model = SVC(C=10, gamma=0.005)
587
+
588
+ n_splits = 3
589
+
590
+ cv = KFold(n_splits=n_splits, shuffle=False)
591
+ mean_score = cross_val_score(model, X, y, cv=cv).mean()
592
+ assert 0.92 > mean_score
593
+ assert mean_score > 0.80
594
+
595
+ # Shuffling the data artificially breaks the dependency and hides the
596
+ # overfitting of the model with regards to the writing style of the authors
597
+ # by yielding a seriously overestimated score:
598
+
599
+ cv = KFold(n_splits, shuffle=True, random_state=0)
600
+ mean_score = cross_val_score(model, X, y, cv=cv).mean()
601
+ assert mean_score > 0.92
602
+
603
+ cv = KFold(n_splits, shuffle=True, random_state=1)
604
+ mean_score = cross_val_score(model, X, y, cv=cv).mean()
605
+ assert mean_score > 0.92
606
+
607
+ # Similarly, StratifiedKFold should try to shuffle the data as little
608
+ # as possible (while respecting the balanced class constraints)
609
+ # and thus be able to detect the dependency by not overestimating
610
+ # the CV score either. As the digits dataset is approximately balanced
611
+ # the estimated mean score is close to the score measured with
612
+ # non-shuffled KFold
613
+
614
+ cv = StratifiedKFold(n_splits)
615
+ mean_score = cross_val_score(model, X, y, cv=cv).mean()
616
+ assert 0.94 > mean_score
617
+ assert mean_score > 0.80
618
+
619
+
620
+ def test_stratified_group_kfold_trivial():
621
+ sgkf = StratifiedGroupKFold(n_splits=3)
622
+ # Trivial example - groups with the same distribution
623
+ y = np.array([1] * 6 + [0] * 12)
624
+ X = np.ones_like(y).reshape(-1, 1)
625
+ groups = np.asarray((1, 2, 3, 4, 5, 6, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6))
626
+ distr = np.bincount(y) / len(y)
627
+ test_sizes = []
628
+ for train, test in sgkf.split(X, y, groups):
629
+ # check group constraint
630
+ assert np.intersect1d(groups[train], groups[test]).size == 0
631
+ # check y distribution
632
+ assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02)
633
+ assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02)
634
+ test_sizes.append(len(test))
635
+ assert np.ptp(test_sizes) <= 1
636
+
637
+
638
+ def test_stratified_group_kfold_approximate():
639
+ # Not perfect stratification (even though it is possible) because of
640
+ # iteration over groups
641
+ sgkf = StratifiedGroupKFold(n_splits=3)
642
+ y = np.array([1] * 6 + [0] * 12)
643
+ X = np.ones_like(y).reshape(-1, 1)
644
+ groups = np.array([1, 2, 3, 3, 4, 4, 1, 1, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6])
645
+ expected = np.asarray([[0.833, 0.166], [0.666, 0.333], [0.5, 0.5]])
646
+ test_sizes = []
647
+ for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected):
648
+ # check group constraint
649
+ assert np.intersect1d(groups[train], groups[test]).size == 0
650
+ split_dist = np.bincount(y[test]) / len(test)
651
+ assert_allclose(split_dist, expect_dist, atol=0.001)
652
+ test_sizes.append(len(test))
653
+ assert np.ptp(test_sizes) <= 1
654
+
655
+
656
+ @pytest.mark.parametrize(
657
+ "y, groups, expected",
658
+ [
659
+ (
660
+ np.array([0] * 6 + [1] * 6),
661
+ np.array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]),
662
+ np.asarray([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]),
663
+ ),
664
+ (
665
+ np.array([0] * 9 + [1] * 3),
666
+ np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6]),
667
+ np.asarray([[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]),
668
+ ),
669
+ ],
670
+ )
671
+ def test_stratified_group_kfold_homogeneous_groups(y, groups, expected):
672
+ sgkf = StratifiedGroupKFold(n_splits=3)
673
+ X = np.ones_like(y).reshape(-1, 1)
674
+ for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected):
675
+ # check group constraint
676
+ assert np.intersect1d(groups[train], groups[test]).size == 0
677
+ split_dist = np.bincount(y[test]) / len(test)
678
+ assert_allclose(split_dist, expect_dist, atol=0.001)
679
+
680
+
681
+ @pytest.mark.parametrize("cls_distr", [(0.4, 0.6), (0.3, 0.7), (0.2, 0.8), (0.8, 0.2)])
682
+ @pytest.mark.parametrize("n_groups", [5, 30, 70])
683
+ def test_stratified_group_kfold_against_group_kfold(cls_distr, n_groups):
684
+ # Check that given sufficient amount of samples StratifiedGroupKFold
685
+ # produces better stratified folds than regular GroupKFold
686
+ n_splits = 5
687
+ sgkf = StratifiedGroupKFold(n_splits=n_splits)
688
+ gkf = GroupKFold(n_splits=n_splits)
689
+ rng = np.random.RandomState(0)
690
+ n_points = 1000
691
+ y = rng.choice(2, size=n_points, p=cls_distr)
692
+ X = np.ones_like(y).reshape(-1, 1)
693
+ g = rng.choice(n_groups, n_points)
694
+ sgkf_folds = sgkf.split(X, y, groups=g)
695
+ gkf_folds = gkf.split(X, y, groups=g)
696
+ sgkf_entr = 0
697
+ gkf_entr = 0
698
+ for (sgkf_train, sgkf_test), (_, gkf_test) in zip(sgkf_folds, gkf_folds):
699
+ # check group constraint
700
+ assert np.intersect1d(g[sgkf_train], g[sgkf_test]).size == 0
701
+ sgkf_distr = np.bincount(y[sgkf_test]) / len(sgkf_test)
702
+ gkf_distr = np.bincount(y[gkf_test]) / len(gkf_test)
703
+ sgkf_entr += stats.entropy(sgkf_distr, qk=cls_distr)
704
+ gkf_entr += stats.entropy(gkf_distr, qk=cls_distr)
705
+ sgkf_entr /= n_splits
706
+ gkf_entr /= n_splits
707
+ assert sgkf_entr <= gkf_entr
708
+
709
+
710
+ def test_shuffle_split():
711
+ ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
712
+ ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
713
+ ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
714
+ ss4 = ShuffleSplit(test_size=int(2), random_state=0).split(X)
715
+ for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
716
+ assert_array_equal(t1[0], t2[0])
717
+ assert_array_equal(t2[0], t3[0])
718
+ assert_array_equal(t3[0], t4[0])
719
+ assert_array_equal(t1[1], t2[1])
720
+ assert_array_equal(t2[1], t3[1])
721
+ assert_array_equal(t3[1], t4[1])
722
+
723
+
724
+ @pytest.mark.parametrize("split_class", [ShuffleSplit, StratifiedShuffleSplit])
725
+ @pytest.mark.parametrize(
726
+ "train_size, exp_train, exp_test", [(None, 9, 1), (8, 8, 2), (0.8, 8, 2)]
727
+ )
728
+ def test_shuffle_split_default_test_size(split_class, train_size, exp_train, exp_test):
729
+ # Check that the default value has the expected behavior, i.e. 0.1 if both
730
+ # unspecified or complement train_size unless both are specified.
731
+ X = np.ones(10)
732
+ y = np.ones(10)
733
+
734
+ X_train, X_test = next(split_class(train_size=train_size).split(X, y))
735
+
736
+ assert len(X_train) == exp_train
737
+ assert len(X_test) == exp_test
738
+
739
+
740
+ @pytest.mark.parametrize(
741
+ "train_size, exp_train, exp_test", [(None, 8, 2), (7, 7, 3), (0.7, 7, 3)]
742
+ )
743
+ def test_group_shuffle_split_default_test_size(train_size, exp_train, exp_test):
744
+ # Check that the default value has the expected behavior, i.e. 0.2 if both
745
+ # unspecified or complement train_size unless both are specified.
746
+ X = np.ones(10)
747
+ y = np.ones(10)
748
+ groups = range(10)
749
+
750
+ X_train, X_test = next(GroupShuffleSplit(train_size=train_size).split(X, y, groups))
751
+
752
+ assert len(X_train) == exp_train
753
+ assert len(X_test) == exp_test
754
+
755
+
756
+ @ignore_warnings
757
+ def test_stratified_shuffle_split_init():
758
+ X = np.arange(7)
759
+ y = np.asarray([0, 1, 1, 1, 2, 2, 2])
760
+ # Check that error is raised if there is a class with only one sample
761
+ with pytest.raises(ValueError):
762
+ next(StratifiedShuffleSplit(3, test_size=0.2).split(X, y))
763
+
764
+ # Check that error is raised if the test set size is smaller than n_classes
765
+ with pytest.raises(ValueError):
766
+ next(StratifiedShuffleSplit(3, test_size=2).split(X, y))
767
+ # Check that error is raised if the train set size is smaller than
768
+ # n_classes
769
+ with pytest.raises(ValueError):
770
+ next(StratifiedShuffleSplit(3, test_size=3, train_size=2).split(X, y))
771
+
772
+ X = np.arange(9)
773
+ y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
774
+
775
+ # Train size or test size too small
776
+ with pytest.raises(ValueError):
777
+ next(StratifiedShuffleSplit(train_size=2).split(X, y))
778
+ with pytest.raises(ValueError):
779
+ next(StratifiedShuffleSplit(test_size=2).split(X, y))
780
+
781
+
782
+ def test_stratified_shuffle_split_respects_test_size():
783
+ y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
784
+ test_size = 5
785
+ train_size = 10
786
+ sss = StratifiedShuffleSplit(
787
+ 6, test_size=test_size, train_size=train_size, random_state=0
788
+ ).split(np.ones(len(y)), y)
789
+ for train, test in sss:
790
+ assert len(train) == train_size
791
+ assert len(test) == test_size
792
+
793
+
794
+ def test_stratified_shuffle_split_iter():
795
+ ys = [
796
+ np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
797
+ np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
798
+ np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
799
+ np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
800
+ np.array([-1] * 800 + [1] * 50),
801
+ np.concatenate([[i] * (100 + i) for i in range(11)]),
802
+ [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
803
+ ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"],
804
+ ]
805
+
806
+ for y in ys:
807
+ sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split(
808
+ np.ones(len(y)), y
809
+ )
810
+ y = np.asanyarray(y) # To make it indexable for y[train]
811
+ # this is how test-size is computed internally
812
+ # in _validate_shuffle_split
813
+ test_size = np.ceil(0.33 * len(y))
814
+ train_size = len(y) - test_size
815
+ for train, test in sss:
816
+ assert_array_equal(np.unique(y[train]), np.unique(y[test]))
817
+ # Checks if folds keep classes proportions
818
+ p_train = np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(
819
+ len(y[train])
820
+ )
821
+ p_test = np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(
822
+ len(y[test])
823
+ )
824
+ assert_array_almost_equal(p_train, p_test, 1)
825
+ assert len(train) + len(test) == y.size
826
+ assert len(train) == train_size
827
+ assert len(test) == test_size
828
+ assert_array_equal(np.intersect1d(train, test), [])
829
+
830
+
831
+ def test_stratified_shuffle_split_even():
832
+ # Test the StratifiedShuffleSplit, indices are drawn with a
833
+ # equal chance
834
+ n_folds = 5
835
+ n_splits = 1000
836
+
837
+ def assert_counts_are_ok(idx_counts, p):
838
+ # Here we test that the distribution of the counts
839
+ # per index is close enough to a binomial
840
+ threshold = 0.05 / n_splits
841
+ bf = stats.binom(n_splits, p)
842
+ for count in idx_counts:
843
+ prob = bf.pmf(count)
844
+ assert (
845
+ prob > threshold
846
+ ), "An index is not drawn with chance corresponding to even draws"
847
+
848
+ for n_samples in (6, 22):
849
+ groups = np.array((n_samples // 2) * [0, 1])
850
+ splits = StratifiedShuffleSplit(
851
+ n_splits=n_splits, test_size=1.0 / n_folds, random_state=0
852
+ )
853
+
854
+ train_counts = [0] * n_samples
855
+ test_counts = [0] * n_samples
856
+ n_splits_actual = 0
857
+ for train, test in splits.split(X=np.ones(n_samples), y=groups):
858
+ n_splits_actual += 1
859
+ for counter, ids in [(train_counts, train), (test_counts, test)]:
860
+ for id in ids:
861
+ counter[id] += 1
862
+ assert n_splits_actual == n_splits
863
+
864
+ n_train, n_test = _validate_shuffle_split(
865
+ n_samples, test_size=1.0 / n_folds, train_size=1.0 - (1.0 / n_folds)
866
+ )
867
+
868
+ assert len(train) == n_train
869
+ assert len(test) == n_test
870
+ assert len(set(train).intersection(test)) == 0
871
+
872
+ group_counts = np.unique(groups)
873
+ assert splits.test_size == 1.0 / n_folds
874
+ assert n_train + n_test == len(groups)
875
+ assert len(group_counts) == 2
876
+ ex_test_p = float(n_test) / n_samples
877
+ ex_train_p = float(n_train) / n_samples
878
+
879
+ assert_counts_are_ok(train_counts, ex_train_p)
880
+ assert_counts_are_ok(test_counts, ex_test_p)
881
+
882
+
883
+ def test_stratified_shuffle_split_overlap_train_test_bug():
884
+ # See https://github.com/scikit-learn/scikit-learn/issues/6121 for
885
+ # the original bug report
886
+ y = [0, 1, 2, 3] * 3 + [4, 5] * 5
887
+ X = np.ones_like(y)
888
+
889
+ sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
890
+
891
+ train, test = next(sss.split(X=X, y=y))
892
+
893
+ # no overlap
894
+ assert_array_equal(np.intersect1d(train, test), [])
895
+
896
+ # complete partition
897
+ assert_array_equal(np.union1d(train, test), np.arange(len(y)))
898
+
899
+
900
+ def test_stratified_shuffle_split_multilabel():
901
+ # fix for issue 9037
902
+ for y in [
903
+ np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
904
+ np.array([[0, 1], [1, 1], [1, 1], [0, 1]]),
905
+ ]:
906
+ X = np.ones_like(y)
907
+ sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
908
+ train, test = next(sss.split(X=X, y=y))
909
+ y_train = y[train]
910
+ y_test = y[test]
911
+
912
+ # no overlap
913
+ assert_array_equal(np.intersect1d(train, test), [])
914
+
915
+ # complete partition
916
+ assert_array_equal(np.union1d(train, test), np.arange(len(y)))
917
+
918
+ # correct stratification of entire rows
919
+ # (by design, here y[:, 0] uniquely determines the entire row of y)
920
+ expected_ratio = np.mean(y[:, 0])
921
+ assert expected_ratio == np.mean(y_train[:, 0])
922
+ assert expected_ratio == np.mean(y_test[:, 0])
923
+
924
+
925
+ def test_stratified_shuffle_split_multilabel_many_labels():
926
+ # fix in PR #9922: for multilabel data with > 1000 labels, str(row)
927
+ # truncates with an ellipsis for elements in positions 4 through
928
+ # len(row) - 4, so labels were not being correctly split using the powerset
929
+ # method for transforming a multilabel problem to a multiclass one; this
930
+ # test checks that this problem is fixed.
931
+ row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1]
932
+ row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1]
933
+ y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100)
934
+ X = np.ones_like(y)
935
+
936
+ sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
937
+ train, test = next(sss.split(X=X, y=y))
938
+ y_train = y[train]
939
+ y_test = y[test]
940
+
941
+ # correct stratification of entire rows
942
+ # (by design, here y[:, 4] uniquely determines the entire row of y)
943
+ expected_ratio = np.mean(y[:, 4])
944
+ assert expected_ratio == np.mean(y_train[:, 4])
945
+ assert expected_ratio == np.mean(y_test[:, 4])
946
+
947
+
948
+ def test_predefinedsplit_with_kfold_split():
949
+ # Check that PredefinedSplit can reproduce a split generated by Kfold.
950
+ folds = np.full(10, -1.0)
951
+ kf_train = []
952
+ kf_test = []
953
+ for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
954
+ kf_train.append(train_ind)
955
+ kf_test.append(test_ind)
956
+ folds[test_ind] = i
957
+ ps = PredefinedSplit(folds)
958
+ # n_splits is simply the no of unique folds
959
+ assert len(np.unique(folds)) == ps.get_n_splits()
960
+ ps_train, ps_test = zip(*ps.split())
961
+ assert_array_equal(ps_train, kf_train)
962
+ assert_array_equal(ps_test, kf_test)
963
+
964
+
965
+ def test_group_shuffle_split():
966
+ for groups_i in test_groups:
967
+ X = y = np.ones(len(groups_i))
968
+ n_splits = 6
969
+ test_size = 1.0 / 3
970
+ slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
971
+
972
+ # Make sure the repr works
973
+ repr(slo)
974
+
975
+ # Test that the length is correct
976
+ assert slo.get_n_splits(X, y, groups=groups_i) == n_splits
977
+
978
+ l_unique = np.unique(groups_i)
979
+ l = np.asarray(groups_i)
980
+
981
+ for train, test in slo.split(X, y, groups=groups_i):
982
+ # First test: no train group is in the test set and vice versa
983
+ l_train_unique = np.unique(l[train])
984
+ l_test_unique = np.unique(l[test])
985
+ assert not np.any(np.isin(l[train], l_test_unique))
986
+ assert not np.any(np.isin(l[test], l_train_unique))
987
+
988
+ # Second test: train and test add up to all the data
989
+ assert l[train].size + l[test].size == l.size
990
+
991
+ # Third test: train and test are disjoint
992
+ assert_array_equal(np.intersect1d(train, test), [])
993
+
994
+ # Fourth test:
995
+ # unique train and test groups are correct, +- 1 for rounding error
996
+ assert abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1
997
+ assert (
998
+ abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1
999
+ )
1000
+
1001
+
1002
+ def test_leave_one_p_group_out():
1003
+ logo = LeaveOneGroupOut()
1004
+ lpgo_1 = LeavePGroupsOut(n_groups=1)
1005
+ lpgo_2 = LeavePGroupsOut(n_groups=2)
1006
+
1007
+ # Make sure the repr works
1008
+ assert repr(logo) == "LeaveOneGroupOut()"
1009
+ assert repr(lpgo_1) == "LeavePGroupsOut(n_groups=1)"
1010
+ assert repr(lpgo_2) == "LeavePGroupsOut(n_groups=2)"
1011
+ assert repr(LeavePGroupsOut(n_groups=3)) == "LeavePGroupsOut(n_groups=3)"
1012
+
1013
+ for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))):
1014
+ for i, groups_i in enumerate(test_groups):
1015
+ n_groups = len(np.unique(groups_i))
1016
+ n_splits = n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2
1017
+ X = y = np.ones(len(groups_i))
1018
+
1019
+ # Test that the length is correct
1020
+ assert cv.get_n_splits(X, y, groups=groups_i) == n_splits
1021
+
1022
+ groups_arr = np.asarray(groups_i)
1023
+
1024
+ # Split using the original list / array / list of string groups_i
1025
+ for train, test in cv.split(X, y, groups=groups_i):
1026
+ # First test: no train group is in the test set and vice versa
1027
+ assert_array_equal(
1028
+ np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), []
1029
+ )
1030
+
1031
+ # Second test: train and test add up to all the data
1032
+ assert len(train) + len(test) == len(groups_i)
1033
+
1034
+ # Third test:
1035
+ # The number of groups in test must be equal to p_groups_out
1036
+ assert np.unique(groups_arr[test]).shape[0], p_groups_out
1037
+
1038
+ # check get_n_splits() with dummy parameters
1039
+ assert logo.get_n_splits(None, None, ["a", "b", "c", "b", "c"]) == 3
1040
+ assert logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]) == 3
1041
+ assert lpgo_2.get_n_splits(None, None, np.arange(4)) == 6
1042
+ assert lpgo_1.get_n_splits(groups=np.arange(4)) == 4
1043
+
1044
+ # raise ValueError if a `groups` parameter is illegal
1045
+ with pytest.raises(ValueError):
1046
+ logo.get_n_splits(None, None, [0.0, np.nan, 0.0])
1047
+ with pytest.raises(ValueError):
1048
+ lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0])
1049
+
1050
+ msg = "The 'groups' parameter should not be None."
1051
+ with pytest.raises(ValueError, match=msg):
1052
+ logo.get_n_splits(None, None, None)
1053
+ with pytest.raises(ValueError, match=msg):
1054
+ lpgo_1.get_n_splits(None, None, None)
1055
+
1056
+
1057
+ def test_leave_group_out_changing_groups():
1058
+ # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
1059
+ # the groups variable is changed before calling split
1060
+ groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
1061
+ X = np.ones(len(groups))
1062
+ groups_changing = np.array(groups, copy=True)
1063
+ lolo = LeaveOneGroupOut().split(X, groups=groups)
1064
+ lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
1065
+ lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
1066
+ lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
1067
+ groups_changing[:] = 0
1068
+ for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
1069
+ for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
1070
+ assert_array_equal(train, train_chan)
1071
+ assert_array_equal(test, test_chan)
1072
+
1073
+ # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
1074
+ assert 3 == LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups)
1075
+ # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
1076
+ assert 3 == LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups)
1077
+
1078
+
1079
+ def test_leave_group_out_order_dependence():
1080
+ # Check that LeaveOneGroupOut orders the splits according to the index
1081
+ # of the group left out.
1082
+ groups = np.array([2, 2, 0, 0, 1, 1])
1083
+ X = np.ones(len(groups))
1084
+
1085
+ splits = iter(LeaveOneGroupOut().split(X, groups=groups))
1086
+
1087
+ expected_indices = [
1088
+ ([0, 1, 4, 5], [2, 3]),
1089
+ ([0, 1, 2, 3], [4, 5]),
1090
+ ([2, 3, 4, 5], [0, 1]),
1091
+ ]
1092
+
1093
+ for expected_train, expected_test in expected_indices:
1094
+ train, test = next(splits)
1095
+ assert_array_equal(train, expected_train)
1096
+ assert_array_equal(test, expected_test)
1097
+
1098
+
1099
+ def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
1100
+ X = y = groups = np.ones(0)
1101
+ msg = re.escape("Found array with 0 sample(s)")
1102
+ with pytest.raises(ValueError, match=msg):
1103
+ next(LeaveOneGroupOut().split(X, y, groups))
1104
+
1105
+ X = y = groups = np.ones(1)
1106
+ msg = re.escape(
1107
+ f"The groups parameter contains fewer than 2 unique groups ({groups})."
1108
+ " LeaveOneGroupOut expects at least 2."
1109
+ )
1110
+ with pytest.raises(ValueError, match=msg):
1111
+ next(LeaveOneGroupOut().split(X, y, groups))
1112
+
1113
+ X = y = groups = np.ones(1)
1114
+ msg = re.escape(
1115
+ "The groups parameter contains fewer than (or equal to) n_groups "
1116
+ f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects "
1117
+ "that at least n_groups + 1 (4) unique groups "
1118
+ "be present"
1119
+ )
1120
+ with pytest.raises(ValueError, match=msg):
1121
+ next(LeavePGroupsOut(n_groups=3).split(X, y, groups))
1122
+
1123
+ X = y = groups = np.arange(3)
1124
+ msg = re.escape(
1125
+ "The groups parameter contains fewer than (or equal to) n_groups "
1126
+ f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects "
1127
+ "that at least n_groups + 1 (4) unique groups "
1128
+ "be present"
1129
+ )
1130
+ with pytest.raises(ValueError, match=msg):
1131
+ next(LeavePGroupsOut(n_groups=3).split(X, y, groups))
1132
+
1133
+
1134
+ @ignore_warnings
1135
+ def test_repeated_cv_value_errors():
1136
+ # n_repeats is not integer or <= 0
1137
+ for cv in (RepeatedKFold, RepeatedStratifiedKFold):
1138
+ with pytest.raises(ValueError):
1139
+ cv(n_repeats=0)
1140
+ with pytest.raises(ValueError):
1141
+ cv(n_repeats=1.5)
1142
+
1143
+
1144
+ @pytest.mark.parametrize("RepeatedCV", [RepeatedKFold, RepeatedStratifiedKFold])
1145
+ def test_repeated_cv_repr(RepeatedCV):
1146
+ n_splits, n_repeats = 2, 6
1147
+ repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats)
1148
+ repeated_cv_repr = "{}(n_repeats=6, n_splits=2, random_state=None)".format(
1149
+ repeated_cv.__class__.__name__
1150
+ )
1151
+ assert repeated_cv_repr == repr(repeated_cv)
1152
+
1153
+
1154
+ def test_repeated_kfold_determinstic_split():
1155
+ X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
1156
+ random_state = 258173307
1157
+ rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state)
1158
+
1159
+ # split should produce same and deterministic splits on
1160
+ # each call
1161
+ for _ in range(3):
1162
+ splits = rkf.split(X)
1163
+ train, test = next(splits)
1164
+ assert_array_equal(train, [2, 4])
1165
+ assert_array_equal(test, [0, 1, 3])
1166
+
1167
+ train, test = next(splits)
1168
+ assert_array_equal(train, [0, 1, 3])
1169
+ assert_array_equal(test, [2, 4])
1170
+
1171
+ train, test = next(splits)
1172
+ assert_array_equal(train, [0, 1])
1173
+ assert_array_equal(test, [2, 3, 4])
1174
+
1175
+ train, test = next(splits)
1176
+ assert_array_equal(train, [2, 3, 4])
1177
+ assert_array_equal(test, [0, 1])
1178
+
1179
+ with pytest.raises(StopIteration):
1180
+ next(splits)
1181
+
1182
+
1183
+ def test_get_n_splits_for_repeated_kfold():
1184
+ n_splits = 3
1185
+ n_repeats = 4
1186
+ rkf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats)
1187
+ expected_n_splits = n_splits * n_repeats
1188
+ assert expected_n_splits == rkf.get_n_splits()
1189
+
1190
+
1191
+ def test_get_n_splits_for_repeated_stratified_kfold():
1192
+ n_splits = 3
1193
+ n_repeats = 4
1194
+ rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats)
1195
+ expected_n_splits = n_splits * n_repeats
1196
+ assert expected_n_splits == rskf.get_n_splits()
1197
+
1198
+
1199
+ def test_repeated_stratified_kfold_determinstic_split():
1200
+ X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
1201
+ y = [1, 1, 1, 0, 0]
1202
+ random_state = 1944695409
1203
+ rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, random_state=random_state)
1204
+
1205
+ # split should produce same and deterministic splits on
1206
+ # each call
1207
+ for _ in range(3):
1208
+ splits = rskf.split(X, y)
1209
+ train, test = next(splits)
1210
+ assert_array_equal(train, [1, 4])
1211
+ assert_array_equal(test, [0, 2, 3])
1212
+
1213
+ train, test = next(splits)
1214
+ assert_array_equal(train, [0, 2, 3])
1215
+ assert_array_equal(test, [1, 4])
1216
+
1217
+ train, test = next(splits)
1218
+ assert_array_equal(train, [2, 3])
1219
+ assert_array_equal(test, [0, 1, 4])
1220
+
1221
+ train, test = next(splits)
1222
+ assert_array_equal(train, [0, 1, 4])
1223
+ assert_array_equal(test, [2, 3])
1224
+
1225
+ with pytest.raises(StopIteration):
1226
+ next(splits)
1227
+
1228
+
1229
+ def test_train_test_split_errors():
1230
+ pytest.raises(ValueError, train_test_split)
1231
+
1232
+ pytest.raises(ValueError, train_test_split, range(3), train_size=1.1)
1233
+
1234
+ pytest.raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6)
1235
+ pytest.raises(
1236
+ ValueError,
1237
+ train_test_split,
1238
+ range(3),
1239
+ test_size=np.float32(0.6),
1240
+ train_size=np.float32(0.6),
1241
+ )
1242
+ pytest.raises(ValueError, train_test_split, range(3), test_size="wrong_type")
1243
+ pytest.raises(ValueError, train_test_split, range(3), test_size=2, train_size=4)
1244
+ pytest.raises(TypeError, train_test_split, range(3), some_argument=1.1)
1245
+ pytest.raises(ValueError, train_test_split, range(3), range(42))
1246
+ pytest.raises(ValueError, train_test_split, range(10), shuffle=False, stratify=True)
1247
+
1248
+ with pytest.raises(
1249
+ ValueError,
1250
+ match=r"train_size=11 should be either positive and "
1251
+ r"smaller than the number of samples 10 or a "
1252
+ r"float in the \(0, 1\) range",
1253
+ ):
1254
+ train_test_split(range(10), train_size=11, test_size=1)
1255
+
1256
+
1257
+ @pytest.mark.parametrize(
1258
+ "train_size, exp_train, exp_test", [(None, 7, 3), (8, 8, 2), (0.8, 8, 2)]
1259
+ )
1260
+ def test_train_test_split_default_test_size(train_size, exp_train, exp_test):
1261
+ # Check that the default value has the expected behavior, i.e. complement
1262
+ # train_size unless both are specified.
1263
+ X_train, X_test = train_test_split(X, train_size=train_size)
1264
+
1265
+ assert len(X_train) == exp_train
1266
+ assert len(X_test) == exp_test
1267
+
1268
+
1269
+ @pytest.mark.parametrize(
1270
+ "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations()
1271
+ )
1272
+ @pytest.mark.parametrize(
1273
+ "shuffle,stratify",
1274
+ (
1275
+ (True, None),
1276
+ (True, np.hstack((np.ones(6), np.zeros(4)))),
1277
+ # stratification only works with shuffling
1278
+ (False, None),
1279
+ ),
1280
+ )
1281
+ def test_array_api_train_test_split(
1282
+ shuffle, stratify, array_namespace, device, dtype_name
1283
+ ):
1284
+ xp = _array_api_for_tests(array_namespace, device)
1285
+
1286
+ X = np.arange(100).reshape((10, 10))
1287
+ y = np.arange(10)
1288
+
1289
+ X_np = X.astype(dtype_name)
1290
+ X_xp = xp.asarray(X_np, device=device)
1291
+
1292
+ y_np = y.astype(dtype_name)
1293
+ y_xp = xp.asarray(y_np, device=device)
1294
+
1295
+ X_train_np, X_test_np, y_train_np, y_test_np = train_test_split(
1296
+ X_np, y, random_state=0, shuffle=shuffle, stratify=stratify
1297
+ )
1298
+ with config_context(array_api_dispatch=True):
1299
+ if stratify is not None:
1300
+ stratify_xp = xp.asarray(stratify)
1301
+ else:
1302
+ stratify_xp = stratify
1303
+ X_train_xp, X_test_xp, y_train_xp, y_test_xp = train_test_split(
1304
+ X_xp, y_xp, shuffle=shuffle, stratify=stratify_xp, random_state=0
1305
+ )
1306
+
1307
+ # Check that namespace is preserved, has to happen with
1308
+ # array_api_dispatch enabled.
1309
+ assert get_namespace(X_train_xp)[0] == get_namespace(X_xp)[0]
1310
+ assert get_namespace(X_test_xp)[0] == get_namespace(X_xp)[0]
1311
+ assert get_namespace(y_train_xp)[0] == get_namespace(y_xp)[0]
1312
+ assert get_namespace(y_test_xp)[0] == get_namespace(y_xp)[0]
1313
+
1314
+ # Check device and dtype is preserved on output
1315
+ assert array_api_device(X_train_xp) == array_api_device(X_xp)
1316
+ assert array_api_device(y_train_xp) == array_api_device(y_xp)
1317
+ assert array_api_device(X_test_xp) == array_api_device(X_xp)
1318
+ assert array_api_device(y_test_xp) == array_api_device(y_xp)
1319
+
1320
+ assert X_train_xp.dtype == X_xp.dtype
1321
+ assert y_train_xp.dtype == y_xp.dtype
1322
+ assert X_test_xp.dtype == X_xp.dtype
1323
+ assert y_test_xp.dtype == y_xp.dtype
1324
+
1325
+ assert_allclose(
1326
+ _convert_to_numpy(X_train_xp, xp=xp),
1327
+ X_train_np,
1328
+ )
1329
+ assert_allclose(
1330
+ _convert_to_numpy(X_test_xp, xp=xp),
1331
+ X_test_np,
1332
+ )
1333
+
1334
+
1335
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
1336
+ def test_train_test_split(coo_container):
1337
+ X = np.arange(100).reshape((10, 10))
1338
+ X_s = coo_container(X)
1339
+ y = np.arange(10)
1340
+
1341
+ # simple test
1342
+ split = train_test_split(X, y, test_size=None, train_size=0.5)
1343
+ X_train, X_test, y_train, y_test = split
1344
+ assert len(y_test) == len(y_train)
1345
+ # test correspondence of X and y
1346
+ assert_array_equal(X_train[:, 0], y_train * 10)
1347
+ assert_array_equal(X_test[:, 0], y_test * 10)
1348
+
1349
+ # don't convert lists to anything else by default
1350
+ split = train_test_split(X, X_s, y.tolist())
1351
+ X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
1352
+ assert isinstance(y_train, list)
1353
+ assert isinstance(y_test, list)
1354
+
1355
+ # allow nd-arrays
1356
+ X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
1357
+ y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
1358
+ split = train_test_split(X_4d, y_3d)
1359
+ assert split[0].shape == (7, 5, 3, 2)
1360
+ assert split[1].shape == (3, 5, 3, 2)
1361
+ assert split[2].shape == (7, 7, 11)
1362
+ assert split[3].shape == (3, 7, 11)
1363
+
1364
+ # test stratification option
1365
+ y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
1366
+ for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]):
1367
+ train, test = train_test_split(
1368
+ y, test_size=test_size, stratify=y, random_state=0
1369
+ )
1370
+ assert len(test) == exp_test_size
1371
+ assert len(test) + len(train) == len(y)
1372
+ # check the 1:1 ratio of ones and twos in the data is preserved
1373
+ assert np.sum(train == 1) == np.sum(train == 2)
1374
+
1375
+ # test unshuffled split
1376
+ y = np.arange(10)
1377
+ for test_size in [2, 0.2]:
1378
+ train, test = train_test_split(y, shuffle=False, test_size=test_size)
1379
+ assert_array_equal(test, [8, 9])
1380
+ assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7])
1381
+
1382
+
1383
+ def test_train_test_split_32bit_overflow():
1384
+ """Check for integer overflow on 32-bit platforms.
1385
+
1386
+ Non-regression test for:
1387
+ https://github.com/scikit-learn/scikit-learn/issues/20774
1388
+ """
1389
+
1390
+ # A number 'n' big enough for expression 'n * n * train_size' to cause
1391
+ # an overflow for signed 32-bit integer
1392
+ big_number = 100000
1393
+
1394
+ # Definition of 'y' is a part of reproduction - population for at least
1395
+ # one class should be in the same order of magnitude as size of X
1396
+ X = np.arange(big_number)
1397
+ y = X > (0.99 * big_number)
1398
+
1399
+ split = train_test_split(X, y, stratify=y, train_size=0.25)
1400
+ X_train, X_test, y_train, y_test = split
1401
+
1402
+ assert X_train.size + X_test.size == big_number
1403
+ assert y_train.size + y_test.size == big_number
1404
+
1405
+
1406
+ @ignore_warnings
1407
+ def test_train_test_split_pandas():
1408
+ # check train_test_split doesn't destroy pandas dataframe
1409
+ types = [MockDataFrame]
1410
+ try:
1411
+ from pandas import DataFrame
1412
+
1413
+ types.append(DataFrame)
1414
+ except ImportError:
1415
+ pass
1416
+ for InputFeatureType in types:
1417
+ # X dataframe
1418
+ X_df = InputFeatureType(X)
1419
+ X_train, X_test = train_test_split(X_df)
1420
+ assert isinstance(X_train, InputFeatureType)
1421
+ assert isinstance(X_test, InputFeatureType)
1422
+
1423
+
1424
+ @pytest.mark.parametrize(
1425
+ "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
1426
+ )
1427
+ def test_train_test_split_sparse(sparse_container):
1428
+ # check that train_test_split converts scipy sparse matrices
1429
+ # to csr, as stated in the documentation
1430
+ X = np.arange(100).reshape((10, 10))
1431
+ X_s = sparse_container(X)
1432
+ X_train, X_test = train_test_split(X_s)
1433
+ assert issparse(X_train) and X_train.format == "csr"
1434
+ assert issparse(X_test) and X_test.format == "csr"
1435
+
1436
+
1437
+ def test_train_test_split_mock_pandas():
1438
+ # X mock dataframe
1439
+ X_df = MockDataFrame(X)
1440
+ X_train, X_test = train_test_split(X_df)
1441
+ assert isinstance(X_train, MockDataFrame)
1442
+ assert isinstance(X_test, MockDataFrame)
1443
+ X_train_arr, X_test_arr = train_test_split(X_df)
1444
+
1445
+
1446
+ def test_train_test_split_list_input():
1447
+ # Check that when y is a list / list of string labels, it works.
1448
+ X = np.ones(7)
1449
+ y1 = ["1"] * 4 + ["0"] * 3
1450
+ y2 = np.hstack((np.ones(4), np.zeros(3)))
1451
+ y3 = y2.tolist()
1452
+
1453
+ for stratify in (True, False):
1454
+ X_train1, X_test1, y_train1, y_test1 = train_test_split(
1455
+ X, y1, stratify=y1 if stratify else None, random_state=0
1456
+ )
1457
+ X_train2, X_test2, y_train2, y_test2 = train_test_split(
1458
+ X, y2, stratify=y2 if stratify else None, random_state=0
1459
+ )
1460
+ X_train3, X_test3, y_train3, y_test3 = train_test_split(
1461
+ X, y3, stratify=y3 if stratify else None, random_state=0
1462
+ )
1463
+
1464
+ np.testing.assert_equal(X_train1, X_train2)
1465
+ np.testing.assert_equal(y_train2, y_train3)
1466
+ np.testing.assert_equal(X_test1, X_test3)
1467
+ np.testing.assert_equal(y_test3, y_test2)
1468
+
1469
+
1470
+ @pytest.mark.parametrize(
1471
+ "test_size, train_size",
1472
+ [(2.0, None), (1.0, None), (0.1, 0.95), (None, 1j), (11, None), (10, None), (8, 3)],
1473
+ )
1474
+ def test_shufflesplit_errors(test_size, train_size):
1475
+ with pytest.raises(ValueError):
1476
+ next(ShuffleSplit(test_size=test_size, train_size=train_size).split(X))
1477
+
1478
+
1479
+ def test_shufflesplit_reproducible():
1480
+ # Check that iterating twice on the ShuffleSplit gives the same
1481
+ # sequence of train-test when the random_state is given
1482
+ ss = ShuffleSplit(random_state=21)
1483
+ assert_array_equal([a for a, b in ss.split(X)], [a for a, b in ss.split(X)])
1484
+
1485
+
1486
+ def test_stratifiedshufflesplit_list_input():
1487
+ # Check that when y is a list / list of string labels, it works.
1488
+ sss = StratifiedShuffleSplit(test_size=2, random_state=42)
1489
+ X = np.ones(7)
1490
+ y1 = ["1"] * 4 + ["0"] * 3
1491
+ y2 = np.hstack((np.ones(4), np.zeros(3)))
1492
+ y3 = y2.tolist()
1493
+
1494
+ np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2)))
1495
+ np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2)))
1496
+
1497
+
1498
+ def test_train_test_split_allow_nans():
1499
+ # Check that train_test_split allows input data with NaNs
1500
+ X = np.arange(200, dtype=np.float64).reshape(10, -1)
1501
+ X[2, :] = np.nan
1502
+ y = np.repeat([0, 1], X.shape[0] / 2)
1503
+ train_test_split(X, y, test_size=0.2, random_state=42)
1504
+
1505
+
1506
+ def test_check_cv():
1507
+ X = np.ones(9)
1508
+ cv = check_cv(3, classifier=False)
1509
+ # Use numpy.testing.assert_equal which recursively compares
1510
+ # lists of lists
1511
+ np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
1512
+
1513
+ y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
1514
+ cv = check_cv(3, y_binary, classifier=True)
1515
+ np.testing.assert_equal(
1516
+ list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary))
1517
+ )
1518
+
1519
+ y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
1520
+ cv = check_cv(3, y_multiclass, classifier=True)
1521
+ np.testing.assert_equal(
1522
+ list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass))
1523
+ )
1524
+ # also works with 2d multiclass
1525
+ y_multiclass_2d = y_multiclass.reshape(-1, 1)
1526
+ cv = check_cv(3, y_multiclass_2d, classifier=True)
1527
+ np.testing.assert_equal(
1528
+ list(StratifiedKFold(3).split(X, y_multiclass_2d)),
1529
+ list(cv.split(X, y_multiclass_2d)),
1530
+ )
1531
+
1532
+ assert not np.all(
1533
+ next(StratifiedKFold(3).split(X, y_multiclass_2d))[0]
1534
+ == next(KFold(3).split(X, y_multiclass_2d))[0]
1535
+ )
1536
+
1537
+ X = np.ones(5)
1538
+ y_multilabel = np.array(
1539
+ [[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]]
1540
+ )
1541
+ cv = check_cv(3, y_multilabel, classifier=True)
1542
+ np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
1543
+
1544
+ y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
1545
+ cv = check_cv(3, y_multioutput, classifier=True)
1546
+ np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
1547
+
1548
+ with pytest.raises(ValueError):
1549
+ check_cv(cv="lolo")
1550
+
1551
+
1552
+ def test_cv_iterable_wrapper():
1553
+ kf_iter = KFold().split(X, y)
1554
+ kf_iter_wrapped = check_cv(kf_iter)
1555
+ # Since the wrapped iterable is enlisted and stored,
1556
+ # split can be called any number of times to produce
1557
+ # consistent results.
1558
+ np.testing.assert_equal(
1559
+ list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y))
1560
+ )
1561
+ # If the splits are randomized, successive calls to split yields different
1562
+ # results
1563
+ kf_randomized_iter = KFold(shuffle=True, random_state=0).split(X, y)
1564
+ kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
1565
+ # numpy's assert_array_equal properly compares nested lists
1566
+ np.testing.assert_equal(
1567
+ list(kf_randomized_iter_wrapped.split(X, y)),
1568
+ list(kf_randomized_iter_wrapped.split(X, y)),
1569
+ )
1570
+
1571
+ try:
1572
+ splits_are_equal = True
1573
+ np.testing.assert_equal(
1574
+ list(kf_iter_wrapped.split(X, y)),
1575
+ list(kf_randomized_iter_wrapped.split(X, y)),
1576
+ )
1577
+ except AssertionError:
1578
+ splits_are_equal = False
1579
+ assert not splits_are_equal, (
1580
+ "If the splits are randomized, "
1581
+ "successive calls to split should yield different results"
1582
+ )
1583
+
1584
+
1585
+ @pytest.mark.parametrize("kfold", [GroupKFold, StratifiedGroupKFold])
1586
+ def test_group_kfold(kfold):
1587
+ rng = np.random.RandomState(0)
1588
+
1589
+ # Parameters of the test
1590
+ n_groups = 15
1591
+ n_samples = 1000
1592
+ n_splits = 5
1593
+
1594
+ X = y = np.ones(n_samples)
1595
+
1596
+ # Construct the test data
1597
+ tolerance = 0.05 * n_samples # 5 percent error allowed
1598
+ groups = rng.randint(0, n_groups, n_samples)
1599
+
1600
+ ideal_n_groups_per_fold = n_samples // n_splits
1601
+
1602
+ len(np.unique(groups))
1603
+ # Get the test fold indices from the test set indices of each fold
1604
+ folds = np.zeros(n_samples)
1605
+ lkf = kfold(n_splits=n_splits)
1606
+ for i, (_, test) in enumerate(lkf.split(X, y, groups)):
1607
+ folds[test] = i
1608
+
1609
+ # Check that folds have approximately the same size
1610
+ assert len(folds) == len(groups)
1611
+ for i in np.unique(folds):
1612
+ assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold)
1613
+
1614
+ # Check that each group appears only in 1 fold
1615
+ for group in np.unique(groups):
1616
+ assert len(np.unique(folds[groups == group])) == 1
1617
+
1618
+ # Check that no group is on both sides of the split
1619
+ groups = np.asarray(groups, dtype=object)
1620
+ for train, test in lkf.split(X, y, groups):
1621
+ assert len(np.intersect1d(groups[train], groups[test])) == 0
1622
+
1623
+ # Construct the test data
1624
+ groups = np.array(
1625
+ [
1626
+ "Albert",
1627
+ "Jean",
1628
+ "Bertrand",
1629
+ "Michel",
1630
+ "Jean",
1631
+ "Francis",
1632
+ "Robert",
1633
+ "Michel",
1634
+ "Rachel",
1635
+ "Lois",
1636
+ "Michelle",
1637
+ "Bernard",
1638
+ "Marion",
1639
+ "Laura",
1640
+ "Jean",
1641
+ "Rachel",
1642
+ "Franck",
1643
+ "John",
1644
+ "Gael",
1645
+ "Anna",
1646
+ "Alix",
1647
+ "Robert",
1648
+ "Marion",
1649
+ "David",
1650
+ "Tony",
1651
+ "Abel",
1652
+ "Becky",
1653
+ "Madmood",
1654
+ "Cary",
1655
+ "Mary",
1656
+ "Alexandre",
1657
+ "David",
1658
+ "Francis",
1659
+ "Barack",
1660
+ "Abdoul",
1661
+ "Rasha",
1662
+ "Xi",
1663
+ "Silvia",
1664
+ ]
1665
+ )
1666
+
1667
+ n_groups = len(np.unique(groups))
1668
+ n_samples = len(groups)
1669
+ n_splits = 5
1670
+ tolerance = 0.05 * n_samples # 5 percent error allowed
1671
+ ideal_n_groups_per_fold = n_samples // n_splits
1672
+
1673
+ X = y = np.ones(n_samples)
1674
+
1675
+ # Get the test fold indices from the test set indices of each fold
1676
+ folds = np.zeros(n_samples)
1677
+ for i, (_, test) in enumerate(lkf.split(X, y, groups)):
1678
+ folds[test] = i
1679
+
1680
+ # Check that folds have approximately the same size
1681
+ assert len(folds) == len(groups)
1682
+ for i in np.unique(folds):
1683
+ assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold)
1684
+
1685
+ # Check that each group appears only in 1 fold
1686
+ with warnings.catch_warnings():
1687
+ warnings.simplefilter("ignore", FutureWarning)
1688
+ for group in np.unique(groups):
1689
+ assert len(np.unique(folds[groups == group])) == 1
1690
+
1691
+ # Check that no group is on both sides of the split
1692
+ groups = np.asarray(groups, dtype=object)
1693
+ for train, test in lkf.split(X, y, groups):
1694
+ assert len(np.intersect1d(groups[train], groups[test])) == 0
1695
+
1696
+ # groups can also be a list
1697
+ cv_iter = list(lkf.split(X, y, groups.tolist()))
1698
+ for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter):
1699
+ assert_array_equal(train1, train2)
1700
+ assert_array_equal(test1, test2)
1701
+
1702
+ # Should fail if there are more folds than groups
1703
+ groups = np.array([1, 1, 1, 2, 2])
1704
+ X = y = np.ones(len(groups))
1705
+ with pytest.raises(ValueError, match="Cannot have number of splits.*greater"):
1706
+ next(GroupKFold(n_splits=3).split(X, y, groups))
1707
+
1708
+
1709
+ def test_time_series_cv():
1710
+ X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
1711
+
1712
+ # Should fail if there are more folds than samples
1713
+ with pytest.raises(ValueError, match="Cannot have number of folds.*greater"):
1714
+ next(TimeSeriesSplit(n_splits=7).split(X))
1715
+
1716
+ tscv = TimeSeriesSplit(2)
1717
+
1718
+ # Manually check that Time Series CV preserves the data
1719
+ # ordering on toy datasets
1720
+ splits = tscv.split(X[:-1])
1721
+ train, test = next(splits)
1722
+ assert_array_equal(train, [0, 1])
1723
+ assert_array_equal(test, [2, 3])
1724
+
1725
+ train, test = next(splits)
1726
+ assert_array_equal(train, [0, 1, 2, 3])
1727
+ assert_array_equal(test, [4, 5])
1728
+
1729
+ splits = TimeSeriesSplit(2).split(X)
1730
+
1731
+ train, test = next(splits)
1732
+ assert_array_equal(train, [0, 1, 2])
1733
+ assert_array_equal(test, [3, 4])
1734
+
1735
+ train, test = next(splits)
1736
+ assert_array_equal(train, [0, 1, 2, 3, 4])
1737
+ assert_array_equal(test, [5, 6])
1738
+
1739
+ # Check get_n_splits returns the correct number of splits
1740
+ splits = TimeSeriesSplit(2).split(X)
1741
+ n_splits_actual = len(list(splits))
1742
+ assert n_splits_actual == tscv.get_n_splits()
1743
+ assert n_splits_actual == 2
1744
+
1745
+
1746
+ def _check_time_series_max_train_size(splits, check_splits, max_train_size):
1747
+ for (train, test), (check_train, check_test) in zip(splits, check_splits):
1748
+ assert_array_equal(test, check_test)
1749
+ assert len(check_train) <= max_train_size
1750
+ suffix_start = max(len(train) - max_train_size, 0)
1751
+ assert_array_equal(check_train, train[suffix_start:])
1752
+
1753
+
1754
+ def test_time_series_max_train_size():
1755
+ X = np.zeros((6, 1))
1756
+ splits = TimeSeriesSplit(n_splits=3).split(X)
1757
+ check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
1758
+ _check_time_series_max_train_size(splits, check_splits, max_train_size=3)
1759
+
1760
+ # Test for the case where the size of a fold is greater than max_train_size
1761
+ check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
1762
+ _check_time_series_max_train_size(splits, check_splits, max_train_size=2)
1763
+
1764
+ # Test for the case where the size of each fold is less than max_train_size
1765
+ check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
1766
+ _check_time_series_max_train_size(splits, check_splits, max_train_size=2)
1767
+
1768
+
1769
+ def test_time_series_test_size():
1770
+ X = np.zeros((10, 1))
1771
+
1772
+ # Test alone
1773
+ splits = TimeSeriesSplit(n_splits=3, test_size=3).split(X)
1774
+
1775
+ train, test = next(splits)
1776
+ assert_array_equal(train, [0])
1777
+ assert_array_equal(test, [1, 2, 3])
1778
+
1779
+ train, test = next(splits)
1780
+ assert_array_equal(train, [0, 1, 2, 3])
1781
+ assert_array_equal(test, [4, 5, 6])
1782
+
1783
+ train, test = next(splits)
1784
+ assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6])
1785
+ assert_array_equal(test, [7, 8, 9])
1786
+
1787
+ # Test with max_train_size
1788
+ splits = TimeSeriesSplit(n_splits=2, test_size=2, max_train_size=4).split(X)
1789
+
1790
+ train, test = next(splits)
1791
+ assert_array_equal(train, [2, 3, 4, 5])
1792
+ assert_array_equal(test, [6, 7])
1793
+
1794
+ train, test = next(splits)
1795
+ assert_array_equal(train, [4, 5, 6, 7])
1796
+ assert_array_equal(test, [8, 9])
1797
+
1798
+ # Should fail with not enough data points for configuration
1799
+ with pytest.raises(ValueError, match="Too many splits.*with test_size"):
1800
+ splits = TimeSeriesSplit(n_splits=5, test_size=2).split(X)
1801
+ next(splits)
1802
+
1803
+
1804
+ def test_time_series_gap():
1805
+ X = np.zeros((10, 1))
1806
+
1807
+ # Test alone
1808
+ splits = TimeSeriesSplit(n_splits=2, gap=2).split(X)
1809
+
1810
+ train, test = next(splits)
1811
+ assert_array_equal(train, [0, 1])
1812
+ assert_array_equal(test, [4, 5, 6])
1813
+
1814
+ train, test = next(splits)
1815
+ assert_array_equal(train, [0, 1, 2, 3, 4])
1816
+ assert_array_equal(test, [7, 8, 9])
1817
+
1818
+ # Test with max_train_size
1819
+ splits = TimeSeriesSplit(n_splits=3, gap=2, max_train_size=2).split(X)
1820
+
1821
+ train, test = next(splits)
1822
+ assert_array_equal(train, [0, 1])
1823
+ assert_array_equal(test, [4, 5])
1824
+
1825
+ train, test = next(splits)
1826
+ assert_array_equal(train, [2, 3])
1827
+ assert_array_equal(test, [6, 7])
1828
+
1829
+ train, test = next(splits)
1830
+ assert_array_equal(train, [4, 5])
1831
+ assert_array_equal(test, [8, 9])
1832
+
1833
+ # Test with test_size
1834
+ splits = TimeSeriesSplit(n_splits=2, gap=2, max_train_size=4, test_size=2).split(X)
1835
+
1836
+ train, test = next(splits)
1837
+ assert_array_equal(train, [0, 1, 2, 3])
1838
+ assert_array_equal(test, [6, 7])
1839
+
1840
+ train, test = next(splits)
1841
+ assert_array_equal(train, [2, 3, 4, 5])
1842
+ assert_array_equal(test, [8, 9])
1843
+
1844
+ # Test with additional test_size
1845
+ splits = TimeSeriesSplit(n_splits=2, gap=2, test_size=3).split(X)
1846
+
1847
+ train, test = next(splits)
1848
+ assert_array_equal(train, [0, 1])
1849
+ assert_array_equal(test, [4, 5, 6])
1850
+
1851
+ train, test = next(splits)
1852
+ assert_array_equal(train, [0, 1, 2, 3, 4])
1853
+ assert_array_equal(test, [7, 8, 9])
1854
+
1855
+ # Verify proper error is thrown
1856
+ with pytest.raises(ValueError, match="Too many splits.*and gap"):
1857
+ splits = TimeSeriesSplit(n_splits=4, gap=2).split(X)
1858
+ next(splits)
1859
+
1860
+
1861
+ def test_nested_cv():
1862
+ # Test if nested cross validation works with different combinations of cv
1863
+ rng = np.random.RandomState(0)
1864
+
1865
+ X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
1866
+ groups = rng.randint(0, 5, 15)
1867
+
1868
+ cvs = [
1869
+ LeaveOneGroupOut(),
1870
+ StratifiedKFold(n_splits=2),
1871
+ LeaveOneOut(),
1872
+ GroupKFold(n_splits=3),
1873
+ StratifiedKFold(),
1874
+ StratifiedGroupKFold(),
1875
+ StratifiedShuffleSplit(n_splits=3, random_state=0),
1876
+ ]
1877
+
1878
+ for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
1879
+ gs = GridSearchCV(
1880
+ DummyClassifier(),
1881
+ param_grid={"strategy": ["stratified", "most_frequent"]},
1882
+ cv=inner_cv,
1883
+ error_score="raise",
1884
+ )
1885
+ cross_val_score(
1886
+ gs, X=X, y=y, groups=groups, cv=outer_cv, params={"groups": groups}
1887
+ )
1888
+
1889
+
1890
+ def test_build_repr():
1891
+ class MockSplitter:
1892
+ def __init__(self, a, b=0, c=None):
1893
+ self.a = a
1894
+ self.b = b
1895
+ self.c = c
1896
+
1897
+ def __repr__(self):
1898
+ return _build_repr(self)
1899
+
1900
+ assert repr(MockSplitter(5, 6)) == "MockSplitter(a=5, b=6, c=None)"
1901
+
1902
+
1903
+ @pytest.mark.parametrize(
1904
+ "CVSplitter", (ShuffleSplit, GroupShuffleSplit, StratifiedShuffleSplit)
1905
+ )
1906
+ def test_shuffle_split_empty_trainset(CVSplitter):
1907
+ cv = CVSplitter(test_size=0.99)
1908
+ X, y = [[1]], [0] # 1 sample
1909
+ with pytest.raises(
1910
+ ValueError,
1911
+ match=(
1912
+ "With n_samples=1, test_size=0.99 and train_size=None, "
1913
+ "the resulting train set will be empty"
1914
+ ),
1915
+ ):
1916
+ next(cv.split(X, y, groups=[1]))
1917
+
1918
+
1919
+ def test_train_test_split_empty_trainset():
1920
+ (X,) = [[1]] # 1 sample
1921
+ with pytest.raises(
1922
+ ValueError,
1923
+ match=(
1924
+ "With n_samples=1, test_size=0.99 and train_size=None, "
1925
+ "the resulting train set will be empty"
1926
+ ),
1927
+ ):
1928
+ train_test_split(X, test_size=0.99)
1929
+
1930
+ X = [[1], [1], [1]] # 3 samples, ask for more than 2 thirds
1931
+ with pytest.raises(
1932
+ ValueError,
1933
+ match=(
1934
+ "With n_samples=3, test_size=0.67 and train_size=None, "
1935
+ "the resulting train set will be empty"
1936
+ ),
1937
+ ):
1938
+ train_test_split(X, test_size=0.67)
1939
+
1940
+
1941
+ def test_leave_one_out_empty_trainset():
1942
+ # LeaveOneGroup out expect at least 2 groups so no need to check
1943
+ cv = LeaveOneOut()
1944
+ X, y = [[1]], [0] # 1 sample
1945
+ with pytest.raises(ValueError, match="Cannot perform LeaveOneOut with n_samples=1"):
1946
+ next(cv.split(X, y))
1947
+
1948
+
1949
+ def test_leave_p_out_empty_trainset():
1950
+ # No need to check LeavePGroupsOut
1951
+ cv = LeavePOut(p=2)
1952
+ X, y = [[1], [2]], [0, 3] # 2 samples
1953
+ with pytest.raises(
1954
+ ValueError, match="p=2 must be strictly less than the number of samples=2"
1955
+ ):
1956
+ next(cv.split(X, y, groups=[1, 2]))
1957
+
1958
+
1959
+ @pytest.mark.parametrize("Klass", (KFold, StratifiedKFold, StratifiedGroupKFold))
1960
+ def test_random_state_shuffle_false(Klass):
1961
+ # passing a non-default random_state when shuffle=False makes no sense
1962
+ with pytest.raises(ValueError, match="has no effect since shuffle is False"):
1963
+ Klass(3, shuffle=False, random_state=0)
1964
+
1965
+
1966
+ @pytest.mark.parametrize(
1967
+ "cv, expected",
1968
+ [
1969
+ (KFold(), True),
1970
+ (KFold(shuffle=True, random_state=123), True),
1971
+ (StratifiedKFold(), True),
1972
+ (StratifiedKFold(shuffle=True, random_state=123), True),
1973
+ (StratifiedGroupKFold(shuffle=True, random_state=123), True),
1974
+ (StratifiedGroupKFold(), True),
1975
+ (RepeatedKFold(random_state=123), True),
1976
+ (RepeatedStratifiedKFold(random_state=123), True),
1977
+ (ShuffleSplit(random_state=123), True),
1978
+ (GroupShuffleSplit(random_state=123), True),
1979
+ (StratifiedShuffleSplit(random_state=123), True),
1980
+ (GroupKFold(), True),
1981
+ (TimeSeriesSplit(), True),
1982
+ (LeaveOneOut(), True),
1983
+ (LeaveOneGroupOut(), True),
1984
+ (LeavePGroupsOut(n_groups=2), True),
1985
+ (LeavePOut(p=2), True),
1986
+ (KFold(shuffle=True, random_state=None), False),
1987
+ (KFold(shuffle=True, random_state=None), False),
1988
+ (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False),
1989
+ (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False),
1990
+ (RepeatedKFold(random_state=None), False),
1991
+ (RepeatedKFold(random_state=np.random.RandomState(0)), False),
1992
+ (RepeatedStratifiedKFold(random_state=None), False),
1993
+ (RepeatedStratifiedKFold(random_state=np.random.RandomState(0)), False),
1994
+ (ShuffleSplit(random_state=None), False),
1995
+ (ShuffleSplit(random_state=np.random.RandomState(0)), False),
1996
+ (GroupShuffleSplit(random_state=None), False),
1997
+ (GroupShuffleSplit(random_state=np.random.RandomState(0)), False),
1998
+ (StratifiedShuffleSplit(random_state=None), False),
1999
+ (StratifiedShuffleSplit(random_state=np.random.RandomState(0)), False),
2000
+ ],
2001
+ )
2002
+ def test_yields_constant_splits(cv, expected):
2003
+ assert _yields_constant_splits(cv) == expected
2004
+
2005
+
2006
+ @pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS])
2007
+ def test_splitter_get_metadata_routing(cv):
2008
+ """Check get_metadata_routing returns the correct MetadataRouter."""
2009
+ assert hasattr(cv, "get_metadata_routing")
2010
+ metadata = cv.get_metadata_routing()
2011
+ if cv in GROUP_SPLITTERS:
2012
+ assert metadata.split.requests["groups"] is True
2013
+ elif cv in NO_GROUP_SPLITTERS:
2014
+ assert not metadata.split.requests
2015
+
2016
+ assert_request_is_empty(metadata, exclude=["split"])
2017
+
2018
+
2019
+ @pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS])
2020
+ def test_splitter_set_split_request(cv):
2021
+ """Check set_split_request is defined for group splitters and not for others."""
2022
+ if cv in GROUP_SPLITTERS:
2023
+ assert hasattr(cv, "set_split_request")
2024
+ elif cv in NO_GROUP_SPLITTERS:
2025
+ assert not hasattr(cv, "set_split_request")
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py ADDED
@@ -0,0 +1,848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import ceil
2
+
3
+ import numpy as np
4
+ import pytest
5
+ from scipy.stats import expon, norm, randint
6
+
7
+ from sklearn.datasets import make_classification
8
+ from sklearn.dummy import DummyClassifier
9
+ from sklearn.experimental import enable_halving_search_cv # noqa
10
+ from sklearn.model_selection import (
11
+ GroupKFold,
12
+ GroupShuffleSplit,
13
+ HalvingGridSearchCV,
14
+ HalvingRandomSearchCV,
15
+ KFold,
16
+ LeaveOneGroupOut,
17
+ LeavePGroupsOut,
18
+ ShuffleSplit,
19
+ StratifiedKFold,
20
+ StratifiedShuffleSplit,
21
+ )
22
+ from sklearn.model_selection._search_successive_halving import (
23
+ _SubsampleMetaSplitter,
24
+ _top_k,
25
+ )
26
+ from sklearn.model_selection.tests.test_search import (
27
+ check_cv_results_array_types,
28
+ check_cv_results_keys,
29
+ )
30
+ from sklearn.svm import SVC, LinearSVC
31
+
32
+
33
+ class FastClassifier(DummyClassifier):
34
+ """Dummy classifier that accepts parameters a, b, ... z.
35
+
36
+ These parameter don't affect the predictions and are useful for fast
37
+ grid searching."""
38
+
39
+ # update the constraints such that we accept all parameters from a to z
40
+ _parameter_constraints: dict = {
41
+ **DummyClassifier._parameter_constraints,
42
+ **{
43
+ chr(key): "no_validation" # type: ignore
44
+ for key in range(ord("a"), ord("z") + 1)
45
+ },
46
+ }
47
+
48
+ def __init__(
49
+ self, strategy="stratified", random_state=None, constant=None, **kwargs
50
+ ):
51
+ super().__init__(
52
+ strategy=strategy, random_state=random_state, constant=constant
53
+ )
54
+
55
+ def get_params(self, deep=False):
56
+ params = super().get_params(deep=deep)
57
+ for char in range(ord("a"), ord("z") + 1):
58
+ params[chr(char)] = "whatever"
59
+ return params
60
+
61
+
62
+ class SometimesFailClassifier(DummyClassifier):
63
+ def __init__(
64
+ self,
65
+ strategy="stratified",
66
+ random_state=None,
67
+ constant=None,
68
+ n_estimators=10,
69
+ fail_fit=False,
70
+ fail_predict=False,
71
+ a=0,
72
+ ):
73
+ self.fail_fit = fail_fit
74
+ self.fail_predict = fail_predict
75
+ self.n_estimators = n_estimators
76
+ self.a = a
77
+
78
+ super().__init__(
79
+ strategy=strategy, random_state=random_state, constant=constant
80
+ )
81
+
82
+ def fit(self, X, y):
83
+ if self.fail_fit:
84
+ raise Exception("fitting failed")
85
+ return super().fit(X, y)
86
+
87
+ def predict(self, X):
88
+ if self.fail_predict:
89
+ raise Exception("predict failed")
90
+ return super().predict(X)
91
+
92
+
93
+ @pytest.mark.filterwarnings("ignore::sklearn.exceptions.FitFailedWarning")
94
+ @pytest.mark.filterwarnings("ignore:Scoring failed:UserWarning")
95
+ @pytest.mark.filterwarnings("ignore:One or more of the:UserWarning")
96
+ @pytest.mark.parametrize("HalvingSearch", (HalvingGridSearchCV, HalvingRandomSearchCV))
97
+ @pytest.mark.parametrize("fail_at", ("fit", "predict"))
98
+ def test_nan_handling(HalvingSearch, fail_at):
99
+ """Check the selection of the best scores in presence of failure represented by
100
+ NaN values."""
101
+ n_samples = 1_000
102
+ X, y = make_classification(n_samples=n_samples, random_state=0)
103
+
104
+ search = HalvingSearch(
105
+ SometimesFailClassifier(),
106
+ {f"fail_{fail_at}": [False, True], "a": range(3)},
107
+ resource="n_estimators",
108
+ max_resources=6,
109
+ min_resources=1,
110
+ factor=2,
111
+ )
112
+
113
+ search.fit(X, y)
114
+
115
+ # estimators that failed during fit/predict should always rank lower
116
+ # than ones where the fit/predict succeeded
117
+ assert not search.best_params_[f"fail_{fail_at}"]
118
+ scores = search.cv_results_["mean_test_score"]
119
+ ranks = search.cv_results_["rank_test_score"]
120
+
121
+ # some scores should be NaN
122
+ assert np.isnan(scores).any()
123
+
124
+ unique_nan_ranks = np.unique(ranks[np.isnan(scores)])
125
+ # all NaN scores should have the same rank
126
+ assert unique_nan_ranks.shape[0] == 1
127
+ # NaNs should have the lowest rank
128
+ assert (unique_nan_ranks[0] >= ranks).all()
129
+
130
+
131
+ @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
132
+ @pytest.mark.parametrize(
133
+ (
134
+ "aggressive_elimination,"
135
+ "max_resources,"
136
+ "expected_n_iterations,"
137
+ "expected_n_required_iterations,"
138
+ "expected_n_possible_iterations,"
139
+ "expected_n_remaining_candidates,"
140
+ "expected_n_candidates,"
141
+ "expected_n_resources,"
142
+ ),
143
+ [
144
+ # notice how it loops at the beginning
145
+ # also, the number of candidates evaluated at the last iteration is
146
+ # <= factor
147
+ (True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),
148
+ # no aggressive elimination: we end up with less iterations, and
149
+ # the number of candidates at the last iter is > factor, which isn't
150
+ # ideal
151
+ (False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),
152
+ # # When the amount of resource isn't limited, aggressive_elimination
153
+ # # has no effect. Here the default min_resources='exhaust' will take
154
+ # # over.
155
+ (True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
156
+ (False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
157
+ ],
158
+ )
159
+ def test_aggressive_elimination(
160
+ Est,
161
+ aggressive_elimination,
162
+ max_resources,
163
+ expected_n_iterations,
164
+ expected_n_required_iterations,
165
+ expected_n_possible_iterations,
166
+ expected_n_remaining_candidates,
167
+ expected_n_candidates,
168
+ expected_n_resources,
169
+ ):
170
+ # Test the aggressive_elimination parameter.
171
+
172
+ n_samples = 1000
173
+ X, y = make_classification(n_samples=n_samples, random_state=0)
174
+ param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
175
+ base_estimator = FastClassifier()
176
+
177
+ if max_resources == "limited":
178
+ max_resources = 180
179
+ else:
180
+ max_resources = n_samples
181
+
182
+ sh = Est(
183
+ base_estimator,
184
+ param_grid,
185
+ aggressive_elimination=aggressive_elimination,
186
+ max_resources=max_resources,
187
+ factor=3,
188
+ )
189
+ sh.set_params(verbose=True) # just for test coverage
190
+
191
+ if Est is HalvingRandomSearchCV:
192
+ # same number of candidates as with the grid
193
+ sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
194
+
195
+ sh.fit(X, y)
196
+
197
+ assert sh.n_iterations_ == expected_n_iterations
198
+ assert sh.n_required_iterations_ == expected_n_required_iterations
199
+ assert sh.n_possible_iterations_ == expected_n_possible_iterations
200
+ assert sh.n_resources_ == expected_n_resources
201
+ assert sh.n_candidates_ == expected_n_candidates
202
+ assert sh.n_remaining_candidates_ == expected_n_remaining_candidates
203
+ assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_
204
+
205
+
206
+ @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
207
+ @pytest.mark.parametrize(
208
+ (
209
+ "min_resources,"
210
+ "max_resources,"
211
+ "expected_n_iterations,"
212
+ "expected_n_possible_iterations,"
213
+ "expected_n_resources,"
214
+ ),
215
+ [
216
+ # with enough resources
217
+ ("smallest", "auto", 2, 4, [20, 60]),
218
+ # with enough resources but min_resources set manually
219
+ (50, "auto", 2, 3, [50, 150]),
220
+ # without enough resources, only one iteration can be done
221
+ ("smallest", 30, 1, 1, [20]),
222
+ # with exhaust: use as much resources as possible at the last iter
223
+ ("exhaust", "auto", 2, 2, [333, 999]),
224
+ ("exhaust", 1000, 2, 2, [333, 999]),
225
+ ("exhaust", 999, 2, 2, [333, 999]),
226
+ ("exhaust", 600, 2, 2, [200, 600]),
227
+ ("exhaust", 599, 2, 2, [199, 597]),
228
+ ("exhaust", 300, 2, 2, [100, 300]),
229
+ ("exhaust", 60, 2, 2, [20, 60]),
230
+ ("exhaust", 50, 1, 1, [20]),
231
+ ("exhaust", 20, 1, 1, [20]),
232
+ ],
233
+ )
234
+ def test_min_max_resources(
235
+ Est,
236
+ min_resources,
237
+ max_resources,
238
+ expected_n_iterations,
239
+ expected_n_possible_iterations,
240
+ expected_n_resources,
241
+ ):
242
+ # Test the min_resources and max_resources parameters, and how they affect
243
+ # the number of resources used at each iteration
244
+ n_samples = 1000
245
+ X, y = make_classification(n_samples=n_samples, random_state=0)
246
+ param_grid = {"a": [1, 2], "b": [1, 2, 3]}
247
+ base_estimator = FastClassifier()
248
+
249
+ sh = Est(
250
+ base_estimator,
251
+ param_grid,
252
+ factor=3,
253
+ min_resources=min_resources,
254
+ max_resources=max_resources,
255
+ )
256
+ if Est is HalvingRandomSearchCV:
257
+ sh.set_params(n_candidates=6) # same number as with the grid
258
+
259
+ sh.fit(X, y)
260
+
261
+ expected_n_required_iterations = 2 # given 6 combinations and factor = 3
262
+ assert sh.n_iterations_ == expected_n_iterations
263
+ assert sh.n_required_iterations_ == expected_n_required_iterations
264
+ assert sh.n_possible_iterations_ == expected_n_possible_iterations
265
+ assert sh.n_resources_ == expected_n_resources
266
+ if min_resources == "exhaust":
267
+ assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_)
268
+
269
+
270
+ @pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
271
+ @pytest.mark.parametrize(
272
+ "max_resources, n_iterations, n_possible_iterations",
273
+ [
274
+ ("auto", 5, 9), # all resources are used
275
+ (1024, 5, 9),
276
+ (700, 5, 8),
277
+ (512, 5, 8),
278
+ (511, 5, 7),
279
+ (32, 4, 4),
280
+ (31, 3, 3),
281
+ (16, 3, 3),
282
+ (4, 1, 1), # max_resources == min_resources, only one iteration is
283
+ # possible
284
+ ],
285
+ )
286
+ def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):
287
+ # test the number of actual iterations that were run depending on
288
+ # max_resources
289
+
290
+ n_samples = 1024
291
+ X, y = make_classification(n_samples=n_samples, random_state=1)
292
+ param_grid = {"a": [1, 2], "b": list(range(10))}
293
+ base_estimator = FastClassifier()
294
+ factor = 2
295
+
296
+ sh = Est(
297
+ base_estimator,
298
+ param_grid,
299
+ cv=2,
300
+ factor=factor,
301
+ max_resources=max_resources,
302
+ min_resources=4,
303
+ )
304
+ if Est is HalvingRandomSearchCV:
305
+ sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV
306
+ sh.fit(X, y)
307
+ assert sh.n_required_iterations_ == 5
308
+ assert sh.n_iterations_ == n_iterations
309
+ assert sh.n_possible_iterations_ == n_possible_iterations
310
+
311
+
312
+ @pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
313
+ def test_resource_parameter(Est):
314
+ # Test the resource parameter
315
+
316
+ n_samples = 1000
317
+ X, y = make_classification(n_samples=n_samples, random_state=0)
318
+ param_grid = {"a": [1, 2], "b": list(range(10))}
319
+ base_estimator = FastClassifier()
320
+ sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3)
321
+ sh.fit(X, y)
322
+ assert set(sh.n_resources_) == set([1, 3, 9])
323
+ for r_i, params, param_c in zip(
324
+ sh.cv_results_["n_resources"],
325
+ sh.cv_results_["params"],
326
+ sh.cv_results_["param_c"],
327
+ ):
328
+ assert r_i == params["c"] == param_c
329
+
330
+ with pytest.raises(
331
+ ValueError, match="Cannot use resource=1234 which is not supported "
332
+ ):
333
+ sh = HalvingGridSearchCV(
334
+ base_estimator, param_grid, cv=2, resource="1234", max_resources=10
335
+ )
336
+ sh.fit(X, y)
337
+
338
+ with pytest.raises(
339
+ ValueError,
340
+ match=(
341
+ "Cannot use parameter c as the resource since it is part "
342
+ "of the searched parameters."
343
+ ),
344
+ ):
345
+ param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]}
346
+ sh = HalvingGridSearchCV(
347
+ base_estimator, param_grid, cv=2, resource="c", max_resources=10
348
+ )
349
+ sh.fit(X, y)
350
+
351
+
352
+ @pytest.mark.parametrize(
353
+ "max_resources, n_candidates, expected_n_candidates",
354
+ [
355
+ (512, "exhaust", 128), # generate exactly as much as needed
356
+ (32, "exhaust", 8),
357
+ (32, 8, 8),
358
+ (32, 7, 7), # ask for less than what we could
359
+ (32, 9, 9), # ask for more than 'reasonable'
360
+ ],
361
+ )
362
+ def test_random_search(max_resources, n_candidates, expected_n_candidates):
363
+ # Test random search and make sure the number of generated candidates is
364
+ # as expected
365
+
366
+ n_samples = 1024
367
+ X, y = make_classification(n_samples=n_samples, random_state=0)
368
+ param_grid = {"a": norm, "b": norm}
369
+ base_estimator = FastClassifier()
370
+ sh = HalvingRandomSearchCV(
371
+ base_estimator,
372
+ param_grid,
373
+ n_candidates=n_candidates,
374
+ cv=2,
375
+ max_resources=max_resources,
376
+ factor=2,
377
+ min_resources=4,
378
+ )
379
+ sh.fit(X, y)
380
+ assert sh.n_candidates_[0] == expected_n_candidates
381
+ if n_candidates == "exhaust":
382
+ # Make sure 'exhaust' makes the last iteration use as much resources as
383
+ # we can
384
+ assert sh.n_resources_[-1] == max_resources
385
+
386
+
387
+ @pytest.mark.parametrize(
388
+ "param_distributions, expected_n_candidates",
389
+ [
390
+ ({"a": [1, 2]}, 2), # all lists, sample less than n_candidates
391
+ ({"a": randint(1, 3)}, 10), # not all list, respect n_candidates
392
+ ],
393
+ )
394
+ def test_random_search_discrete_distributions(
395
+ param_distributions, expected_n_candidates
396
+ ):
397
+ # Make sure random search samples the appropriate number of candidates when
398
+ # we ask for more than what's possible. How many parameters are sampled
399
+ # depends whether the distributions are 'all lists' or not (see
400
+ # ParameterSampler for details). This is somewhat redundant with the checks
401
+ # in ParameterSampler but interaction bugs were discovered during
402
+ # development of SH
403
+
404
+ n_samples = 1024
405
+ X, y = make_classification(n_samples=n_samples, random_state=0)
406
+ base_estimator = FastClassifier()
407
+ sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10)
408
+ sh.fit(X, y)
409
+ assert sh.n_candidates_[0] == expected_n_candidates
410
+
411
+
412
+ @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
413
+ @pytest.mark.parametrize(
414
+ "params, expected_error_message",
415
+ [
416
+ (
417
+ {"resource": "not_a_parameter"},
418
+ "Cannot use resource=not_a_parameter which is not supported",
419
+ ),
420
+ (
421
+ {"resource": "a", "max_resources": 100},
422
+ "Cannot use parameter a as the resource since it is part of",
423
+ ),
424
+ (
425
+ {"max_resources": "auto", "resource": "b"},
426
+ "resource can only be 'n_samples' when max_resources='auto'",
427
+ ),
428
+ (
429
+ {"min_resources": 15, "max_resources": 14},
430
+ "min_resources_=15 is greater than max_resources_=14",
431
+ ),
432
+ ({"cv": KFold(shuffle=True)}, "must yield consistent folds"),
433
+ ({"cv": ShuffleSplit()}, "must yield consistent folds"),
434
+ ],
435
+ )
436
+ def test_input_errors(Est, params, expected_error_message):
437
+ base_estimator = FastClassifier()
438
+ param_grid = {"a": [1]}
439
+ X, y = make_classification(100)
440
+
441
+ sh = Est(base_estimator, param_grid, **params)
442
+
443
+ with pytest.raises(ValueError, match=expected_error_message):
444
+ sh.fit(X, y)
445
+
446
+
447
+ @pytest.mark.parametrize(
448
+ "params, expected_error_message",
449
+ [
450
+ (
451
+ {"n_candidates": "exhaust", "min_resources": "exhaust"},
452
+ "cannot be both set to 'exhaust'",
453
+ ),
454
+ ],
455
+ )
456
+ def test_input_errors_randomized(params, expected_error_message):
457
+ # tests specific to HalvingRandomSearchCV
458
+
459
+ base_estimator = FastClassifier()
460
+ param_grid = {"a": [1]}
461
+ X, y = make_classification(100)
462
+
463
+ sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)
464
+
465
+ with pytest.raises(ValueError, match=expected_error_message):
466
+ sh.fit(X, y)
467
+
468
+
469
+ @pytest.mark.parametrize(
470
+ "fraction, subsample_test, expected_train_size, expected_test_size",
471
+ [
472
+ (0.5, True, 40, 10),
473
+ (0.5, False, 40, 20),
474
+ (0.2, True, 16, 4),
475
+ (0.2, False, 16, 20),
476
+ ],
477
+ )
478
+ def test_subsample_splitter_shapes(
479
+ fraction, subsample_test, expected_train_size, expected_test_size
480
+ ):
481
+ # Make sure splits returned by SubsampleMetaSplitter are of appropriate
482
+ # size
483
+
484
+ n_samples = 100
485
+ X, y = make_classification(n_samples)
486
+ cv = _SubsampleMetaSplitter(
487
+ base_cv=KFold(5),
488
+ fraction=fraction,
489
+ subsample_test=subsample_test,
490
+ random_state=None,
491
+ )
492
+
493
+ for train, test in cv.split(X, y):
494
+ assert train.shape[0] == expected_train_size
495
+ assert test.shape[0] == expected_test_size
496
+ if subsample_test:
497
+ assert train.shape[0] + test.shape[0] == int(n_samples * fraction)
498
+ else:
499
+ assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()
500
+
501
+
502
+ @pytest.mark.parametrize("subsample_test", (True, False))
503
+ def test_subsample_splitter_determinism(subsample_test):
504
+ # Make sure _SubsampleMetaSplitter is consistent across calls to split():
505
+ # - we're OK having training sets differ (they're always sampled with a
506
+ # different fraction anyway)
507
+ # - when we don't subsample the test set, we want it to be always the same.
508
+ # This check is the most important. This is ensured by the determinism
509
+ # of the base_cv.
510
+
511
+ # Note: we could force both train and test splits to be always the same if
512
+ # we drew an int seed in _SubsampleMetaSplitter.__init__
513
+
514
+ n_samples = 100
515
+ X, y = make_classification(n_samples)
516
+ cv = _SubsampleMetaSplitter(
517
+ base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None
518
+ )
519
+
520
+ folds_a = list(cv.split(X, y, groups=None))
521
+ folds_b = list(cv.split(X, y, groups=None))
522
+
523
+ for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):
524
+ assert not np.all(train_a == train_b)
525
+
526
+ if subsample_test:
527
+ assert not np.all(test_a == test_b)
528
+ else:
529
+ assert np.all(test_a == test_b)
530
+ assert np.all(X[test_a] == X[test_b])
531
+
532
+
533
+ @pytest.mark.parametrize(
534
+ "k, itr, expected",
535
+ [
536
+ (1, 0, ["c"]),
537
+ (2, 0, ["a", "c"]),
538
+ (4, 0, ["d", "b", "a", "c"]),
539
+ (10, 0, ["d", "b", "a", "c"]),
540
+ (1, 1, ["e"]),
541
+ (2, 1, ["f", "e"]),
542
+ (10, 1, ["f", "e"]),
543
+ (1, 2, ["i"]),
544
+ (10, 2, ["g", "h", "i"]),
545
+ ],
546
+ )
547
+ def test_top_k(k, itr, expected):
548
+ results = { # this isn't a 'real world' result dict
549
+ "iter": [0, 0, 0, 0, 1, 1, 2, 2, 2],
550
+ "mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9],
551
+ "params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
552
+ }
553
+ got = _top_k(results, k=k, itr=itr)
554
+ assert np.all(got == expected)
555
+
556
+
557
+ @pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
558
+ def test_cv_results(Est):
559
+ # test that the cv_results_ matches correctly the logic of the
560
+ # tournament: in particular that the candidates continued in each
561
+ # successive iteration are those that were best in the previous iteration
562
+ pd = pytest.importorskip("pandas")
563
+
564
+ rng = np.random.RandomState(0)
565
+
566
+ n_samples = 1000
567
+ X, y = make_classification(n_samples=n_samples, random_state=0)
568
+ param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
569
+ base_estimator = FastClassifier()
570
+
571
+ # generate random scores: we want to avoid ties, which would otherwise
572
+ # mess with the ordering and make testing harder
573
+ def scorer(est, X, y):
574
+ return rng.rand()
575
+
576
+ sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)
577
+ if Est is HalvingRandomSearchCV:
578
+ # same number of candidates as with the grid
579
+ sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
580
+
581
+ sh.fit(X, y)
582
+
583
+ # non-regression check for
584
+ # https://github.com/scikit-learn/scikit-learn/issues/19203
585
+ assert isinstance(sh.cv_results_["iter"], np.ndarray)
586
+ assert isinstance(sh.cv_results_["n_resources"], np.ndarray)
587
+
588
+ cv_results_df = pd.DataFrame(sh.cv_results_)
589
+
590
+ # just make sure we don't have ties
591
+ assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df)
592
+
593
+ cv_results_df["params_str"] = cv_results_df["params"].apply(str)
594
+ table = cv_results_df.pivot(
595
+ index="params_str", columns="iter", values="mean_test_score"
596
+ )
597
+
598
+ # table looks like something like this:
599
+ # iter 0 1 2 3 4 5
600
+ # params_str
601
+ # {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN
602
+ # {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN
603
+ # {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN
604
+ # {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN
605
+ # {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN
606
+ # ...
607
+
608
+ # where a NaN indicates that the candidate wasn't evaluated at a given
609
+ # iteration, because it wasn't part of the top-K at some previous
610
+ # iteration. We here make sure that candidates that aren't in the top-k at
611
+ # any given iteration are indeed not evaluated at the subsequent
612
+ # iterations.
613
+ nan_mask = pd.isna(table)
614
+ n_iter = sh.n_iterations_
615
+ for it in range(n_iter - 1):
616
+ already_discarded_mask = nan_mask[it]
617
+
618
+ # make sure that if a candidate is already discarded, we don't evaluate
619
+ # it later
620
+ assert (
621
+ already_discarded_mask & nan_mask[it + 1] == already_discarded_mask
622
+ ).all()
623
+
624
+ # make sure that the number of discarded candidate is correct
625
+ discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]
626
+ kept_mask = ~already_discarded_mask & ~discarded_now_mask
627
+ assert kept_mask.sum() == sh.n_candidates_[it + 1]
628
+
629
+ # make sure that all discarded candidates have a lower score than the
630
+ # kept candidates
631
+ discarded_max_score = table[it].where(discarded_now_mask).max()
632
+ kept_min_score = table[it].where(kept_mask).min()
633
+ assert discarded_max_score < kept_min_score
634
+
635
+ # We now make sure that the best candidate is chosen only from the last
636
+ # iteration.
637
+ # We also make sure this is true even if there were higher scores in
638
+ # earlier rounds (this isn't generally the case, but worth ensuring it's
639
+ # possible).
640
+
641
+ last_iter = cv_results_df["iter"].max()
642
+ idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][
643
+ "mean_test_score"
644
+ ].idxmax()
645
+ idx_best_all_iters = cv_results_df["mean_test_score"].idxmax()
646
+
647
+ assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"]
648
+ assert (
649
+ cv_results_df.iloc[idx_best_last_iter]["mean_test_score"]
650
+ < cv_results_df.iloc[idx_best_all_iters]["mean_test_score"]
651
+ )
652
+ assert (
653
+ cv_results_df.iloc[idx_best_last_iter]["params"]
654
+ != cv_results_df.iloc[idx_best_all_iters]["params"]
655
+ )
656
+
657
+
658
+ @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
659
+ def test_base_estimator_inputs(Est):
660
+ # make sure that the base estimators are passed the correct parameters and
661
+ # number of samples at each iteration.
662
+ pd = pytest.importorskip("pandas")
663
+
664
+ passed_n_samples_fit = []
665
+ passed_n_samples_predict = []
666
+ passed_params = []
667
+
668
+ class FastClassifierBookKeeping(FastClassifier):
669
+ def fit(self, X, y):
670
+ passed_n_samples_fit.append(X.shape[0])
671
+ return super().fit(X, y)
672
+
673
+ def predict(self, X):
674
+ passed_n_samples_predict.append(X.shape[0])
675
+ return super().predict(X)
676
+
677
+ def set_params(self, **params):
678
+ passed_params.append(params)
679
+ return super().set_params(**params)
680
+
681
+ n_samples = 1024
682
+ n_splits = 2
683
+ X, y = make_classification(n_samples=n_samples, random_state=0)
684
+ param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
685
+ base_estimator = FastClassifierBookKeeping()
686
+
687
+ sh = Est(
688
+ base_estimator,
689
+ param_grid,
690
+ factor=2,
691
+ cv=n_splits,
692
+ return_train_score=False,
693
+ refit=False,
694
+ )
695
+ if Est is HalvingRandomSearchCV:
696
+ # same number of candidates as with the grid
697
+ sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
698
+
699
+ sh.fit(X, y)
700
+
701
+ assert len(passed_n_samples_fit) == len(passed_n_samples_predict)
702
+ passed_n_samples = [
703
+ x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict)
704
+ ]
705
+
706
+ # Lists are of length n_splits * n_iter * n_candidates_at_i.
707
+ # Each chunk of size n_splits corresponds to the n_splits folds for the
708
+ # same candidate at the same iteration, so they contain equal values. We
709
+ # subsample such that the lists are of length n_iter * n_candidates_at_it
710
+ passed_n_samples = passed_n_samples[::n_splits]
711
+ passed_params = passed_params[::n_splits]
712
+
713
+ cv_results_df = pd.DataFrame(sh.cv_results_)
714
+
715
+ assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)
716
+
717
+ uniques, counts = np.unique(passed_n_samples, return_counts=True)
718
+ assert (sh.n_resources_ == uniques).all()
719
+ assert (sh.n_candidates_ == counts).all()
720
+
721
+ assert (cv_results_df["params"] == passed_params).all()
722
+ assert (cv_results_df["n_resources"] == passed_n_samples).all()
723
+
724
+
725
+ @pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
726
+ def test_groups_support(Est):
727
+ # Check if ValueError (when groups is None) propagates to
728
+ # HalvingGridSearchCV and HalvingRandomSearchCV
729
+ # And also check if groups is correctly passed to the cv object
730
+ rng = np.random.RandomState(0)
731
+
732
+ X, y = make_classification(n_samples=50, n_classes=2, random_state=0)
733
+ groups = rng.randint(0, 3, 50)
734
+
735
+ clf = LinearSVC(dual="auto", random_state=0)
736
+ grid = {"C": [1]}
737
+
738
+ group_cvs = [
739
+ LeaveOneGroupOut(),
740
+ LeavePGroupsOut(2),
741
+ GroupKFold(n_splits=3),
742
+ GroupShuffleSplit(random_state=0),
743
+ ]
744
+ error_msg = "The 'groups' parameter should not be None."
745
+ for cv in group_cvs:
746
+ gs = Est(clf, grid, cv=cv, random_state=0)
747
+ with pytest.raises(ValueError, match=error_msg):
748
+ gs.fit(X, y)
749
+ gs.fit(X, y, groups=groups)
750
+
751
+ non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]
752
+ for cv in non_group_cvs:
753
+ gs = Est(clf, grid, cv=cv)
754
+ # Should not raise an error
755
+ gs.fit(X, y)
756
+
757
+
758
+ @pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV])
759
+ def test_min_resources_null(SearchCV):
760
+ """Check that we raise an error if the minimum resources is set to 0."""
761
+ base_estimator = FastClassifier()
762
+ param_grid = {"a": [1]}
763
+ X = np.empty(0).reshape(0, 3)
764
+
765
+ search = SearchCV(base_estimator, param_grid, min_resources="smallest")
766
+
767
+ err_msg = "min_resources_=0: you might have passed an empty dataset X."
768
+ with pytest.raises(ValueError, match=err_msg):
769
+ search.fit(X, [])
770
+
771
+
772
+ @pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV])
773
+ def test_select_best_index(SearchCV):
774
+ """Check the selection strategy of the halving search."""
775
+ results = { # this isn't a 'real world' result dict
776
+ "iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),
777
+ "mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),
778
+ "params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]),
779
+ }
780
+
781
+ # we expect the index of 'i'
782
+ best_index = SearchCV._select_best_index(None, None, results)
783
+ assert best_index == 8
784
+
785
+
786
+ def test_halving_random_search_list_of_dicts():
787
+ """Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution`
788
+ being a list of dictionary.
789
+ """
790
+ X, y = make_classification(n_samples=150, n_features=4, random_state=42)
791
+
792
+ params = [
793
+ {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)},
794
+ {"kernel": ["poly"], "degree": [2, 3]},
795
+ ]
796
+ param_keys = (
797
+ "param_C",
798
+ "param_degree",
799
+ "param_gamma",
800
+ "param_kernel",
801
+ )
802
+ score_keys = (
803
+ "mean_test_score",
804
+ "mean_train_score",
805
+ "rank_test_score",
806
+ "split0_test_score",
807
+ "split1_test_score",
808
+ "split2_test_score",
809
+ "split0_train_score",
810
+ "split1_train_score",
811
+ "split2_train_score",
812
+ "std_test_score",
813
+ "std_train_score",
814
+ "mean_fit_time",
815
+ "std_fit_time",
816
+ "mean_score_time",
817
+ "std_score_time",
818
+ )
819
+ extra_keys = ("n_resources", "iter")
820
+
821
+ search = HalvingRandomSearchCV(
822
+ SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0
823
+ )
824
+ search.fit(X, y)
825
+ n_candidates = sum(search.n_candidates_)
826
+ cv_results = search.cv_results_
827
+ # Check results structure
828
+ check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys)
829
+ check_cv_results_array_types(search, param_keys, score_keys)
830
+
831
+ assert all(
832
+ (
833
+ cv_results["param_C"].mask[i]
834
+ and cv_results["param_gamma"].mask[i]
835
+ and not cv_results["param_degree"].mask[i]
836
+ )
837
+ for i in range(n_candidates)
838
+ if cv_results["param_kernel"][i] == "poly"
839
+ )
840
+ assert all(
841
+ (
842
+ not cv_results["param_C"].mask[i]
843
+ and not cv_results["param_gamma"].mask[i]
844
+ and cv_results["param_degree"].mask[i]
845
+ )
846
+ for i in range(n_candidates)
847
+ if cv_results["param_kernel"][i] == "rbf"
848
+ )
env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py ADDED
@@ -0,0 +1,2621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the validation module"""
2
+ import os
3
+ import re
4
+ import sys
5
+ import tempfile
6
+ import warnings
7
+ from functools import partial
8
+ from io import StringIO
9
+ from time import sleep
10
+
11
+ import numpy as np
12
+ import pytest
13
+ from scipy.sparse import issparse
14
+
15
+ from sklearn.base import BaseEstimator, clone
16
+ from sklearn.cluster import KMeans
17
+ from sklearn.datasets import (
18
+ load_diabetes,
19
+ load_digits,
20
+ load_iris,
21
+ make_classification,
22
+ make_multilabel_classification,
23
+ make_regression,
24
+ )
25
+ from sklearn.ensemble import RandomForestClassifier
26
+ from sklearn.exceptions import FitFailedWarning
27
+ from sklearn.impute import SimpleImputer
28
+ from sklearn.linear_model import (
29
+ LogisticRegression,
30
+ PassiveAggressiveClassifier,
31
+ Ridge,
32
+ RidgeClassifier,
33
+ SGDClassifier,
34
+ )
35
+ from sklearn.metrics import (
36
+ accuracy_score,
37
+ check_scoring,
38
+ confusion_matrix,
39
+ explained_variance_score,
40
+ make_scorer,
41
+ mean_squared_error,
42
+ precision_recall_fscore_support,
43
+ precision_score,
44
+ r2_score,
45
+ )
46
+ from sklearn.model_selection import (
47
+ GridSearchCV,
48
+ GroupKFold,
49
+ GroupShuffleSplit,
50
+ KFold,
51
+ LeaveOneGroupOut,
52
+ LeaveOneOut,
53
+ LeavePGroupsOut,
54
+ ShuffleSplit,
55
+ StratifiedKFold,
56
+ cross_val_predict,
57
+ cross_val_score,
58
+ cross_validate,
59
+ learning_curve,
60
+ permutation_test_score,
61
+ validation_curve,
62
+ )
63
+ from sklearn.model_selection._validation import (
64
+ _check_is_permutation,
65
+ _fit_and_score,
66
+ _score,
67
+ )
68
+ from sklearn.model_selection.tests.common import OneTimeSplitter
69
+ from sklearn.model_selection.tests.test_search import FailingClassifier
70
+ from sklearn.multiclass import OneVsRestClassifier
71
+ from sklearn.neighbors import KNeighborsClassifier
72
+ from sklearn.neural_network import MLPRegressor
73
+ from sklearn.pipeline import Pipeline
74
+ from sklearn.preprocessing import LabelEncoder, scale
75
+ from sklearn.svm import SVC, LinearSVC
76
+ from sklearn.tests.metadata_routing_common import (
77
+ ConsumingClassifier,
78
+ ConsumingScorer,
79
+ ConsumingSplitter,
80
+ _Registry,
81
+ check_recorded_metadata,
82
+ )
83
+ from sklearn.utils import shuffle
84
+ from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
85
+ from sklearn.utils._testing import (
86
+ assert_allclose,
87
+ assert_almost_equal,
88
+ assert_array_almost_equal,
89
+ assert_array_equal,
90
+ )
91
+ from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS
92
+ from sklearn.utils.validation import _num_samples
93
+
94
+
95
+ class MockImprovingEstimator(BaseEstimator):
96
+ """Dummy classifier to test the learning curve"""
97
+
98
+ def __init__(self, n_max_train_sizes):
99
+ self.n_max_train_sizes = n_max_train_sizes
100
+ self.train_sizes = 0
101
+ self.X_subset = None
102
+
103
+ def fit(self, X_subset, y_subset=None):
104
+ self.X_subset = X_subset
105
+ self.train_sizes = X_subset.shape[0]
106
+ return self
107
+
108
+ def predict(self, X):
109
+ raise NotImplementedError
110
+
111
+ def score(self, X=None, Y=None):
112
+ # training score becomes worse (2 -> 1), test error better (0 -> 1)
113
+ if self._is_training_data(X):
114
+ return 2.0 - float(self.train_sizes) / self.n_max_train_sizes
115
+ else:
116
+ return float(self.train_sizes) / self.n_max_train_sizes
117
+
118
+ def _is_training_data(self, X):
119
+ return X is self.X_subset
120
+
121
+
122
+ class MockIncrementalImprovingEstimator(MockImprovingEstimator):
123
+ """Dummy classifier that provides partial_fit"""
124
+
125
+ def __init__(self, n_max_train_sizes, expected_fit_params=None):
126
+ super().__init__(n_max_train_sizes)
127
+ self.x = None
128
+ self.expected_fit_params = expected_fit_params
129
+
130
+ def _is_training_data(self, X):
131
+ return self.x in X
132
+
133
+ def partial_fit(self, X, y=None, **params):
134
+ self.train_sizes += X.shape[0]
135
+ self.x = X[0]
136
+ if self.expected_fit_params:
137
+ missing = set(self.expected_fit_params) - set(params)
138
+ if missing:
139
+ raise AssertionError(
140
+ f"Expected fit parameter(s) {list(missing)} not seen."
141
+ )
142
+ for key, value in params.items():
143
+ if key in self.expected_fit_params and _num_samples(
144
+ value
145
+ ) != _num_samples(X):
146
+ raise AssertionError(
147
+ f"Fit parameter {key} has length {_num_samples(value)}"
148
+ f"; expected {_num_samples(X)}."
149
+ )
150
+
151
+
152
+ class MockEstimatorWithParameter(BaseEstimator):
153
+ """Dummy classifier to test the validation curve"""
154
+
155
+ def __init__(self, param=0.5):
156
+ self.X_subset = None
157
+ self.param = param
158
+
159
+ def fit(self, X_subset, y_subset):
160
+ self.X_subset = X_subset
161
+ self.train_sizes = X_subset.shape[0]
162
+ return self
163
+
164
+ def predict(self, X):
165
+ raise NotImplementedError
166
+
167
+ def score(self, X=None, y=None):
168
+ return self.param if self._is_training_data(X) else 1 - self.param
169
+
170
+ def _is_training_data(self, X):
171
+ return X is self.X_subset
172
+
173
+
174
+ class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
175
+ """Dummy classifier that disallows repeated calls of fit method"""
176
+
177
+ def fit(self, X_subset, y_subset):
178
+ assert not hasattr(self, "fit_called_"), "fit is called the second time"
179
+ self.fit_called_ = True
180
+ return super().fit(X_subset, y_subset)
181
+
182
+ def predict(self, X):
183
+ raise NotImplementedError
184
+
185
+
186
+ class MockClassifier:
187
+ """Dummy classifier to test the cross-validation"""
188
+
189
+ def __init__(self, a=0, allow_nd=False):
190
+ self.a = a
191
+ self.allow_nd = allow_nd
192
+
193
+ def fit(
194
+ self,
195
+ X,
196
+ Y=None,
197
+ sample_weight=None,
198
+ class_prior=None,
199
+ sparse_sample_weight=None,
200
+ sparse_param=None,
201
+ dummy_int=None,
202
+ dummy_str=None,
203
+ dummy_obj=None,
204
+ callback=None,
205
+ ):
206
+ """The dummy arguments are to test that this fit function can
207
+ accept non-array arguments through cross-validation, such as:
208
+ - int
209
+ - str (this is actually array-like)
210
+ - object
211
+ - function
212
+ """
213
+ self.dummy_int = dummy_int
214
+ self.dummy_str = dummy_str
215
+ self.dummy_obj = dummy_obj
216
+ if callback is not None:
217
+ callback(self)
218
+
219
+ if self.allow_nd:
220
+ X = X.reshape(len(X), -1)
221
+ if X.ndim >= 3 and not self.allow_nd:
222
+ raise ValueError("X cannot be d")
223
+ if sample_weight is not None:
224
+ assert sample_weight.shape[0] == X.shape[0], (
225
+ "MockClassifier extra fit_param "
226
+ "sample_weight.shape[0] is {0}, should be {1}".format(
227
+ sample_weight.shape[0], X.shape[0]
228
+ )
229
+ )
230
+ if class_prior is not None:
231
+ assert class_prior.shape[0] == len(np.unique(y)), (
232
+ "MockClassifier extra fit_param class_prior.shape[0]"
233
+ " is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y)))
234
+ )
235
+ if sparse_sample_weight is not None:
236
+ fmt = (
237
+ "MockClassifier extra fit_param sparse_sample_weight"
238
+ ".shape[0] is {0}, should be {1}"
239
+ )
240
+ assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format(
241
+ sparse_sample_weight.shape[0], X.shape[0]
242
+ )
243
+ if sparse_param is not None:
244
+ fmt = (
245
+ "MockClassifier extra fit_param sparse_param.shape "
246
+ "is ({0}, {1}), should be ({2}, {3})"
247
+ )
248
+ assert sparse_param.shape == P.shape, fmt.format(
249
+ sparse_param.shape[0],
250
+ sparse_param.shape[1],
251
+ P.shape[0],
252
+ P.shape[1],
253
+ )
254
+ return self
255
+
256
+ def predict(self, T):
257
+ if self.allow_nd:
258
+ T = T.reshape(len(T), -1)
259
+ return T[:, 0]
260
+
261
+ def predict_proba(self, T):
262
+ return T
263
+
264
+ def score(self, X=None, Y=None):
265
+ return 1.0 / (1 + np.abs(self.a))
266
+
267
+ def get_params(self, deep=False):
268
+ return {"a": self.a, "allow_nd": self.allow_nd}
269
+
270
+
271
+ # XXX: use 2D array, since 1D X is being detected as a single sample in
272
+ # check_consistent_length
273
+ X = np.ones((10, 2))
274
+ y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
275
+ # The number of samples per class needs to be > n_splits,
276
+ # for StratifiedKFold(n_splits=3)
277
+ y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
278
+ P = np.eye(5)
279
+
280
+
281
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
282
+ def test_cross_val_score(coo_container):
283
+ clf = MockClassifier()
284
+ X_sparse = coo_container(X)
285
+
286
+ for a in range(-10, 10):
287
+ clf.a = a
288
+ # Smoke test
289
+ scores = cross_val_score(clf, X, y2)
290
+ assert_array_equal(scores, clf.score(X, y2))
291
+
292
+ # test with multioutput y
293
+ multioutput_y = np.column_stack([y2, y2[::-1]])
294
+ scores = cross_val_score(clf, X_sparse, multioutput_y)
295
+ assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
296
+
297
+ scores = cross_val_score(clf, X_sparse, y2)
298
+ assert_array_equal(scores, clf.score(X_sparse, y2))
299
+
300
+ # test with multioutput y
301
+ scores = cross_val_score(clf, X_sparse, multioutput_y)
302
+ assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
303
+
304
+ # test with X and y as list
305
+ list_check = lambda x: isinstance(x, list)
306
+ clf = CheckingClassifier(check_X=list_check)
307
+ scores = cross_val_score(clf, X.tolist(), y2.tolist(), cv=3)
308
+
309
+ clf = CheckingClassifier(check_y=list_check)
310
+ scores = cross_val_score(clf, X, y2.tolist(), cv=3)
311
+
312
+ # test with 3d X and
313
+ X_3d = X[:, :, np.newaxis]
314
+ clf = MockClassifier(allow_nd=True)
315
+ scores = cross_val_score(clf, X_3d, y2)
316
+
317
+ clf = MockClassifier(allow_nd=False)
318
+ with pytest.raises(ValueError):
319
+ cross_val_score(clf, X_3d, y2, error_score="raise")
320
+
321
+
322
+ def test_cross_validate_many_jobs():
323
+ # regression test for #12154: cv='warn' with n_jobs>1 trigger a copy of
324
+ # the parameters leading to a failure in check_cv due to cv is 'warn'
325
+ # instead of cv == 'warn'.
326
+ X, y = load_iris(return_X_y=True)
327
+ clf = SVC(gamma="auto")
328
+ grid = GridSearchCV(clf, param_grid={"C": [1, 10]})
329
+ cross_validate(grid, X, y, n_jobs=2)
330
+
331
+
332
+ def test_cross_validate_invalid_scoring_param():
333
+ X, y = make_classification(random_state=0)
334
+ estimator = MockClassifier()
335
+
336
+ # Test the errors
337
+ error_message_regexp = ".*must be unique strings.*"
338
+
339
+ # List/tuple of callables should raise a message advising users to use
340
+ # dict of names to callables mapping
341
+ with pytest.raises(ValueError, match=error_message_regexp):
342
+ cross_validate(
343
+ estimator,
344
+ X,
345
+ y,
346
+ scoring=(make_scorer(precision_score), make_scorer(accuracy_score)),
347
+ )
348
+ with pytest.raises(ValueError, match=error_message_regexp):
349
+ cross_validate(estimator, X, y, scoring=(make_scorer(precision_score),))
350
+
351
+ # So should empty lists/tuples
352
+ with pytest.raises(ValueError, match=error_message_regexp + "Empty list.*"):
353
+ cross_validate(estimator, X, y, scoring=())
354
+
355
+ # So should duplicated entries
356
+ with pytest.raises(ValueError, match=error_message_regexp + "Duplicate.*"):
357
+ cross_validate(estimator, X, y, scoring=("f1_micro", "f1_micro"))
358
+
359
+ # Nested Lists should raise a generic error message
360
+ with pytest.raises(ValueError, match=error_message_regexp):
361
+ cross_validate(estimator, X, y, scoring=[[make_scorer(precision_score)]])
362
+
363
+ # Empty dict should raise invalid scoring error
364
+ with pytest.raises(ValueError, match="An empty dict"):
365
+ cross_validate(estimator, X, y, scoring=(dict()))
366
+
367
+ multiclass_scorer = make_scorer(precision_recall_fscore_support)
368
+
369
+ # Multiclass Scorers that return multiple values are not supported yet
370
+ # the warning message we're expecting to see
371
+ warning_message = (
372
+ "Scoring failed. The score on this train-test "
373
+ f"partition for these parameters will be set to {np.nan}. "
374
+ "Details: \n"
375
+ )
376
+
377
+ with pytest.warns(UserWarning, match=warning_message):
378
+ cross_validate(estimator, X, y, scoring=multiclass_scorer)
379
+
380
+ with pytest.warns(UserWarning, match=warning_message):
381
+ cross_validate(estimator, X, y, scoring={"foo": multiclass_scorer})
382
+
383
+
384
+ def test_cross_validate_nested_estimator():
385
+ # Non-regression test to ensure that nested
386
+ # estimators are properly returned in a list
387
+ # https://github.com/scikit-learn/scikit-learn/pull/17745
388
+ (X, y) = load_iris(return_X_y=True)
389
+ pipeline = Pipeline(
390
+ [
391
+ ("imputer", SimpleImputer()),
392
+ ("classifier", MockClassifier()),
393
+ ]
394
+ )
395
+
396
+ results = cross_validate(pipeline, X, y, return_estimator=True)
397
+ estimators = results["estimator"]
398
+
399
+ assert isinstance(estimators, list)
400
+ assert all(isinstance(estimator, Pipeline) for estimator in estimators)
401
+
402
+
403
+ @pytest.mark.parametrize("use_sparse", [False, True])
404
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
405
+ def test_cross_validate(use_sparse: bool, csr_container):
406
+ # Compute train and test mse/r2 scores
407
+ cv = KFold()
408
+
409
+ # Regression
410
+ X_reg, y_reg = make_regression(n_samples=30, random_state=0)
411
+ reg = Ridge(random_state=0)
412
+
413
+ # Classification
414
+ X_clf, y_clf = make_classification(n_samples=30, random_state=0)
415
+ clf = SVC(kernel="linear", random_state=0)
416
+
417
+ if use_sparse:
418
+ X_reg = csr_container(X_reg)
419
+ X_clf = csr_container(X_clf)
420
+
421
+ for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
422
+ # It's okay to evaluate regression metrics on classification too
423
+ mse_scorer = check_scoring(est, scoring="neg_mean_squared_error")
424
+ r2_scorer = check_scoring(est, scoring="r2")
425
+ train_mse_scores = []
426
+ test_mse_scores = []
427
+ train_r2_scores = []
428
+ test_r2_scores = []
429
+ fitted_estimators = []
430
+
431
+ for train, test in cv.split(X, y):
432
+ est = clone(est).fit(X[train], y[train])
433
+ train_mse_scores.append(mse_scorer(est, X[train], y[train]))
434
+ train_r2_scores.append(r2_scorer(est, X[train], y[train]))
435
+ test_mse_scores.append(mse_scorer(est, X[test], y[test]))
436
+ test_r2_scores.append(r2_scorer(est, X[test], y[test]))
437
+ fitted_estimators.append(est)
438
+
439
+ train_mse_scores = np.array(train_mse_scores)
440
+ test_mse_scores = np.array(test_mse_scores)
441
+ train_r2_scores = np.array(train_r2_scores)
442
+ test_r2_scores = np.array(test_r2_scores)
443
+ fitted_estimators = np.array(fitted_estimators)
444
+
445
+ scores = (
446
+ train_mse_scores,
447
+ test_mse_scores,
448
+ train_r2_scores,
449
+ test_r2_scores,
450
+ fitted_estimators,
451
+ )
452
+
453
+ # To ensure that the test does not suffer from
454
+ # large statistical fluctuations due to slicing small datasets,
455
+ # we pass the cross-validation instance
456
+ check_cross_validate_single_metric(est, X, y, scores, cv)
457
+ check_cross_validate_multi_metric(est, X, y, scores, cv)
458
+
459
+
460
+ def check_cross_validate_single_metric(clf, X, y, scores, cv):
461
+ (
462
+ train_mse_scores,
463
+ test_mse_scores,
464
+ train_r2_scores,
465
+ test_r2_scores,
466
+ fitted_estimators,
467
+ ) = scores
468
+ # Test single metric evaluation when scoring is string or singleton list
469
+ for return_train_score, dict_len in ((True, 4), (False, 3)):
470
+ # Single metric passed as a string
471
+ if return_train_score:
472
+ mse_scores_dict = cross_validate(
473
+ clf,
474
+ X,
475
+ y,
476
+ scoring="neg_mean_squared_error",
477
+ return_train_score=True,
478
+ cv=cv,
479
+ )
480
+ assert_array_almost_equal(mse_scores_dict["train_score"], train_mse_scores)
481
+ else:
482
+ mse_scores_dict = cross_validate(
483
+ clf,
484
+ X,
485
+ y,
486
+ scoring="neg_mean_squared_error",
487
+ return_train_score=False,
488
+ cv=cv,
489
+ )
490
+ assert isinstance(mse_scores_dict, dict)
491
+ assert len(mse_scores_dict) == dict_len
492
+ assert_array_almost_equal(mse_scores_dict["test_score"], test_mse_scores)
493
+
494
+ # Single metric passed as a list
495
+ if return_train_score:
496
+ # It must be True by default - deprecated
497
+ r2_scores_dict = cross_validate(
498
+ clf, X, y, scoring=["r2"], return_train_score=True, cv=cv
499
+ )
500
+ assert_array_almost_equal(r2_scores_dict["train_r2"], train_r2_scores, True)
501
+ else:
502
+ r2_scores_dict = cross_validate(
503
+ clf, X, y, scoring=["r2"], return_train_score=False, cv=cv
504
+ )
505
+ assert isinstance(r2_scores_dict, dict)
506
+ assert len(r2_scores_dict) == dict_len
507
+ assert_array_almost_equal(r2_scores_dict["test_r2"], test_r2_scores)
508
+
509
+ # Test return_estimator option
510
+ mse_scores_dict = cross_validate(
511
+ clf, X, y, scoring="neg_mean_squared_error", return_estimator=True, cv=cv
512
+ )
513
+ for k, est in enumerate(mse_scores_dict["estimator"]):
514
+ est_coef = est.coef_.copy()
515
+ if issparse(est_coef):
516
+ est_coef = est_coef.toarray()
517
+
518
+ fitted_est_coef = fitted_estimators[k].coef_.copy()
519
+ if issparse(fitted_est_coef):
520
+ fitted_est_coef = fitted_est_coef.toarray()
521
+
522
+ assert_almost_equal(est_coef, fitted_est_coef)
523
+ assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_)
524
+
525
+
526
+ def check_cross_validate_multi_metric(clf, X, y, scores, cv):
527
+ # Test multimetric evaluation when scoring is a list / dict
528
+ (
529
+ train_mse_scores,
530
+ test_mse_scores,
531
+ train_r2_scores,
532
+ test_r2_scores,
533
+ fitted_estimators,
534
+ ) = scores
535
+
536
+ def custom_scorer(clf, X, y):
537
+ y_pred = clf.predict(X)
538
+ return {
539
+ "r2": r2_score(y, y_pred),
540
+ "neg_mean_squared_error": -mean_squared_error(y, y_pred),
541
+ }
542
+
543
+ all_scoring = (
544
+ ("r2", "neg_mean_squared_error"),
545
+ {
546
+ "r2": make_scorer(r2_score),
547
+ "neg_mean_squared_error": "neg_mean_squared_error",
548
+ },
549
+ custom_scorer,
550
+ )
551
+
552
+ keys_sans_train = {
553
+ "test_r2",
554
+ "test_neg_mean_squared_error",
555
+ "fit_time",
556
+ "score_time",
557
+ }
558
+ keys_with_train = keys_sans_train.union(
559
+ {"train_r2", "train_neg_mean_squared_error"}
560
+ )
561
+
562
+ for return_train_score in (True, False):
563
+ for scoring in all_scoring:
564
+ if return_train_score:
565
+ # return_train_score must be True by default - deprecated
566
+ cv_results = cross_validate(
567
+ clf, X, y, scoring=scoring, return_train_score=True, cv=cv
568
+ )
569
+ assert_array_almost_equal(cv_results["train_r2"], train_r2_scores)
570
+ assert_array_almost_equal(
571
+ cv_results["train_neg_mean_squared_error"], train_mse_scores
572
+ )
573
+ else:
574
+ cv_results = cross_validate(
575
+ clf, X, y, scoring=scoring, return_train_score=False, cv=cv
576
+ )
577
+ assert isinstance(cv_results, dict)
578
+ assert set(cv_results.keys()) == (
579
+ keys_with_train if return_train_score else keys_sans_train
580
+ )
581
+ assert_array_almost_equal(cv_results["test_r2"], test_r2_scores)
582
+ assert_array_almost_equal(
583
+ cv_results["test_neg_mean_squared_error"], test_mse_scores
584
+ )
585
+
586
+ # Make sure all the arrays are of np.ndarray type
587
+ assert type(cv_results["test_r2"]) == np.ndarray
588
+ assert type(cv_results["test_neg_mean_squared_error"]) == np.ndarray
589
+ assert type(cv_results["fit_time"]) == np.ndarray
590
+ assert type(cv_results["score_time"]) == np.ndarray
591
+
592
+ # Ensure all the times are within sane limits
593
+ assert np.all(cv_results["fit_time"] >= 0)
594
+ assert np.all(cv_results["fit_time"] < 10)
595
+ assert np.all(cv_results["score_time"] >= 0)
596
+ assert np.all(cv_results["score_time"] < 10)
597
+
598
+
599
+ def test_cross_val_score_predict_groups():
600
+ # Check if ValueError (when groups is None) propagates to cross_val_score
601
+ # and cross_val_predict
602
+ # And also check if groups is correctly passed to the cv object
603
+ X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
604
+
605
+ clf = SVC(kernel="linear")
606
+
607
+ group_cvs = [
608
+ LeaveOneGroupOut(),
609
+ LeavePGroupsOut(2),
610
+ GroupKFold(),
611
+ GroupShuffleSplit(),
612
+ ]
613
+ error_message = "The 'groups' parameter should not be None."
614
+ for cv in group_cvs:
615
+ with pytest.raises(ValueError, match=error_message):
616
+ cross_val_score(estimator=clf, X=X, y=y, cv=cv)
617
+ with pytest.raises(ValueError, match=error_message):
618
+ cross_val_predict(estimator=clf, X=X, y=y, cv=cv)
619
+
620
+
621
+ @pytest.mark.filterwarnings("ignore: Using or importing the ABCs from")
622
+ def test_cross_val_score_pandas():
623
+ # check cross_val_score doesn't destroy pandas dataframe
624
+ types = [(MockDataFrame, MockDataFrame)]
625
+ try:
626
+ from pandas import DataFrame, Series
627
+
628
+ types.append((Series, DataFrame))
629
+ except ImportError:
630
+ pass
631
+ for TargetType, InputFeatureType in types:
632
+ # X dataframe, y series
633
+ # 3 fold cross val is used so we need at least 3 samples per class
634
+ X_df, y_ser = InputFeatureType(X), TargetType(y2)
635
+ check_df = lambda x: isinstance(x, InputFeatureType)
636
+ check_series = lambda x: isinstance(x, TargetType)
637
+ clf = CheckingClassifier(check_X=check_df, check_y=check_series)
638
+ cross_val_score(clf, X_df, y_ser, cv=3)
639
+
640
+
641
+ def test_cross_val_score_mask():
642
+ # test that cross_val_score works with boolean masks
643
+ svm = SVC(kernel="linear")
644
+ iris = load_iris()
645
+ X, y = iris.data, iris.target
646
+ kfold = KFold(5)
647
+ scores_indices = cross_val_score(svm, X, y, cv=kfold)
648
+ kfold = KFold(5)
649
+ cv_masks = []
650
+ for train, test in kfold.split(X, y):
651
+ mask_train = np.zeros(len(y), dtype=bool)
652
+ mask_test = np.zeros(len(y), dtype=bool)
653
+ mask_train[train] = 1
654
+ mask_test[test] = 1
655
+ cv_masks.append((train, test))
656
+ scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
657
+ assert_array_equal(scores_indices, scores_masks)
658
+
659
+
660
+ def test_cross_val_score_precomputed():
661
+ # test for svm with precomputed kernel
662
+ svm = SVC(kernel="precomputed")
663
+ iris = load_iris()
664
+ X, y = iris.data, iris.target
665
+ linear_kernel = np.dot(X, X.T)
666
+ score_precomputed = cross_val_score(svm, linear_kernel, y)
667
+ svm = SVC(kernel="linear")
668
+ score_linear = cross_val_score(svm, X, y)
669
+ assert_array_almost_equal(score_precomputed, score_linear)
670
+
671
+ # test with callable
672
+ svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
673
+ score_callable = cross_val_score(svm, X, y)
674
+ assert_array_almost_equal(score_precomputed, score_callable)
675
+
676
+ # Error raised for non-square X
677
+ svm = SVC(kernel="precomputed")
678
+ with pytest.raises(ValueError):
679
+ cross_val_score(svm, X, y)
680
+
681
+ # test error is raised when the precomputed kernel is not array-like
682
+ # or sparse
683
+ with pytest.raises(ValueError):
684
+ cross_val_score(svm, linear_kernel.tolist(), y)
685
+
686
+
687
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
688
+ def test_cross_val_score_fit_params(coo_container):
689
+ clf = MockClassifier()
690
+ n_samples = X.shape[0]
691
+ n_classes = len(np.unique(y))
692
+
693
+ W_sparse = coo_container(
694
+ (np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)
695
+ )
696
+ P_sparse = coo_container(np.eye(5))
697
+
698
+ DUMMY_INT = 42
699
+ DUMMY_STR = "42"
700
+ DUMMY_OBJ = object()
701
+
702
+ def assert_fit_params(clf):
703
+ # Function to test that the values are passed correctly to the
704
+ # classifier arguments for non-array type
705
+
706
+ assert clf.dummy_int == DUMMY_INT
707
+ assert clf.dummy_str == DUMMY_STR
708
+ assert clf.dummy_obj == DUMMY_OBJ
709
+
710
+ fit_params = {
711
+ "sample_weight": np.ones(n_samples),
712
+ "class_prior": np.full(n_classes, 1.0 / n_classes),
713
+ "sparse_sample_weight": W_sparse,
714
+ "sparse_param": P_sparse,
715
+ "dummy_int": DUMMY_INT,
716
+ "dummy_str": DUMMY_STR,
717
+ "dummy_obj": DUMMY_OBJ,
718
+ "callback": assert_fit_params,
719
+ }
720
+ cross_val_score(clf, X, y, params=fit_params)
721
+
722
+
723
+ def test_cross_val_score_score_func():
724
+ clf = MockClassifier()
725
+ _score_func_args = []
726
+
727
+ def score_func(y_test, y_predict):
728
+ _score_func_args.append((y_test, y_predict))
729
+ return 1.0
730
+
731
+ with warnings.catch_warnings(record=True):
732
+ scoring = make_scorer(score_func)
733
+ score = cross_val_score(clf, X, y, scoring=scoring, cv=3)
734
+ assert_array_equal(score, [1.0, 1.0, 1.0])
735
+ # Test that score function is called only 3 times (for cv=3)
736
+ assert len(_score_func_args) == 3
737
+
738
+
739
+ def test_cross_val_score_with_score_func_classification():
740
+ iris = load_iris()
741
+ clf = SVC(kernel="linear")
742
+
743
+ # Default score (should be the accuracy score)
744
+ scores = cross_val_score(clf, iris.data, iris.target)
745
+ assert_array_almost_equal(scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)
746
+
747
+ # Correct classification score (aka. zero / one score) - should be the
748
+ # same as the default estimator score
749
+ zo_scores = cross_val_score(clf, iris.data, iris.target, scoring="accuracy")
750
+ assert_array_almost_equal(zo_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)
751
+
752
+ # F1 score (class are balanced so f1_score should be equal to zero/one
753
+ # score
754
+ f1_scores = cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted")
755
+ assert_array_almost_equal(f1_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)
756
+
757
+
758
+ def test_cross_val_score_with_score_func_regression():
759
+ X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0)
760
+ reg = Ridge()
761
+
762
+ # Default score of the Ridge regression estimator
763
+ scores = cross_val_score(reg, X, y)
764
+ assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
765
+
766
+ # R2 score (aka. determination coefficient) - should be the
767
+ # same as the default estimator score
768
+ r2_scores = cross_val_score(reg, X, y, scoring="r2")
769
+ assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
770
+
771
+ # Mean squared error; this is a loss function, so "scores" are negative
772
+ neg_mse_scores = cross_val_score(reg, X, y, scoring="neg_mean_squared_error")
773
+ expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
774
+ assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
775
+
776
+ # Explained variance
777
+ scoring = make_scorer(explained_variance_score)
778
+ ev_scores = cross_val_score(reg, X, y, scoring=scoring)
779
+ assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
780
+
781
+
782
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
783
+ def test_permutation_score(coo_container):
784
+ iris = load_iris()
785
+ X = iris.data
786
+ X_sparse = coo_container(X)
787
+ y = iris.target
788
+ svm = SVC(kernel="linear")
789
+ cv = StratifiedKFold(2)
790
+
791
+ score, scores, pvalue = permutation_test_score(
792
+ svm, X, y, n_permutations=30, cv=cv, scoring="accuracy"
793
+ )
794
+ assert score > 0.9
795
+ assert_almost_equal(pvalue, 0.0, 1)
796
+
797
+ score_group, _, pvalue_group = permutation_test_score(
798
+ svm,
799
+ X,
800
+ y,
801
+ n_permutations=30,
802
+ cv=cv,
803
+ scoring="accuracy",
804
+ groups=np.ones(y.size),
805
+ random_state=0,
806
+ )
807
+ assert score_group == score
808
+ assert pvalue_group == pvalue
809
+
810
+ # check that we obtain the same results with a sparse representation
811
+ svm_sparse = SVC(kernel="linear")
812
+ cv_sparse = StratifiedKFold(2)
813
+ score_group, _, pvalue_group = permutation_test_score(
814
+ svm_sparse,
815
+ X_sparse,
816
+ y,
817
+ n_permutations=30,
818
+ cv=cv_sparse,
819
+ scoring="accuracy",
820
+ groups=np.ones(y.size),
821
+ random_state=0,
822
+ )
823
+
824
+ assert score_group == score
825
+ assert pvalue_group == pvalue
826
+
827
+ # test with custom scoring object
828
+ def custom_score(y_true, y_pred):
829
+ return ((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]
830
+
831
+ scorer = make_scorer(custom_score)
832
+ score, _, pvalue = permutation_test_score(
833
+ svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0
834
+ )
835
+ assert_almost_equal(score, 0.93, 2)
836
+ assert_almost_equal(pvalue, 0.01, 3)
837
+
838
+ # set random y
839
+ y = np.mod(np.arange(len(y)), 3)
840
+
841
+ score, scores, pvalue = permutation_test_score(
842
+ svm, X, y, n_permutations=30, cv=cv, scoring="accuracy"
843
+ )
844
+
845
+ assert score < 0.5
846
+ assert pvalue > 0.2
847
+
848
+
849
+ def test_permutation_test_score_allow_nans():
850
+ # Check that permutation_test_score allows input data with NaNs
851
+ X = np.arange(200, dtype=np.float64).reshape(10, -1)
852
+ X[2, :] = np.nan
853
+ y = np.repeat([0, 1], X.shape[0] / 2)
854
+ p = Pipeline(
855
+ [
856
+ ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)),
857
+ ("classifier", MockClassifier()),
858
+ ]
859
+ )
860
+ permutation_test_score(p, X, y)
861
+
862
+
863
+ def test_permutation_test_score_fit_params():
864
+ X = np.arange(100).reshape(10, 10)
865
+ y = np.array([0] * 5 + [1] * 5)
866
+ clf = CheckingClassifier(expected_sample_weight=True)
867
+
868
+ err_msg = r"Expected sample_weight to be passed"
869
+ with pytest.raises(AssertionError, match=err_msg):
870
+ permutation_test_score(clf, X, y)
871
+
872
+ err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!"
873
+ with pytest.raises(ValueError, match=err_msg):
874
+ permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(1)})
875
+ permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(10)})
876
+
877
+
878
+ def test_cross_val_score_allow_nans():
879
+ # Check that cross_val_score allows input data with NaNs
880
+ X = np.arange(200, dtype=np.float64).reshape(10, -1)
881
+ X[2, :] = np.nan
882
+ y = np.repeat([0, 1], X.shape[0] / 2)
883
+ p = Pipeline(
884
+ [
885
+ ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)),
886
+ ("classifier", MockClassifier()),
887
+ ]
888
+ )
889
+ cross_val_score(p, X, y)
890
+
891
+
892
+ def test_cross_val_score_multilabel():
893
+ X = np.array(
894
+ [
895
+ [-3, 4],
896
+ [2, 4],
897
+ [3, 3],
898
+ [0, 2],
899
+ [-3, 1],
900
+ [-2, 1],
901
+ [0, 0],
902
+ [-2, -1],
903
+ [-1, -2],
904
+ [1, -2],
905
+ ]
906
+ )
907
+ y = np.array(
908
+ [[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]
909
+ )
910
+ clf = KNeighborsClassifier(n_neighbors=1)
911
+ scoring_micro = make_scorer(precision_score, average="micro")
912
+ scoring_macro = make_scorer(precision_score, average="macro")
913
+ scoring_samples = make_scorer(precision_score, average="samples")
914
+ score_micro = cross_val_score(clf, X, y, scoring=scoring_micro)
915
+ score_macro = cross_val_score(clf, X, y, scoring=scoring_macro)
916
+ score_samples = cross_val_score(clf, X, y, scoring=scoring_samples)
917
+ assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
918
+ assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
919
+ assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
920
+
921
+
922
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
923
+ def test_cross_val_predict(coo_container):
924
+ X, y = load_diabetes(return_X_y=True)
925
+ cv = KFold()
926
+
927
+ est = Ridge()
928
+
929
+ # Naive loop (should be same as cross_val_predict):
930
+ preds2 = np.zeros_like(y)
931
+ for train, test in cv.split(X, y):
932
+ est.fit(X[train], y[train])
933
+ preds2[test] = est.predict(X[test])
934
+
935
+ preds = cross_val_predict(est, X, y, cv=cv)
936
+ assert_array_almost_equal(preds, preds2)
937
+
938
+ preds = cross_val_predict(est, X, y)
939
+ assert len(preds) == len(y)
940
+
941
+ cv = LeaveOneOut()
942
+ preds = cross_val_predict(est, X, y, cv=cv)
943
+ assert len(preds) == len(y)
944
+
945
+ Xsp = X.copy()
946
+ Xsp *= Xsp > np.median(Xsp)
947
+ Xsp = coo_container(Xsp)
948
+ preds = cross_val_predict(est, Xsp, y)
949
+ assert_array_almost_equal(len(preds), len(y))
950
+
951
+ preds = cross_val_predict(KMeans(n_init="auto"), X)
952
+ assert len(preds) == len(y)
953
+
954
+ class BadCV:
955
+ def split(self, X, y=None, groups=None):
956
+ for i in range(4):
957
+ yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
958
+
959
+ with pytest.raises(ValueError):
960
+ cross_val_predict(est, X, y, cv=BadCV())
961
+
962
+ X, y = load_iris(return_X_y=True)
963
+
964
+ warning_message = (
965
+ r"Number of classes in training fold \(2\) does "
966
+ r"not match total number of classes \(3\). "
967
+ "Results may not be appropriate for your use case."
968
+ )
969
+ with pytest.warns(RuntimeWarning, match=warning_message):
970
+ cross_val_predict(
971
+ LogisticRegression(solver="liblinear"),
972
+ X,
973
+ y,
974
+ method="predict_proba",
975
+ cv=KFold(2),
976
+ )
977
+
978
+
979
+ def test_cross_val_predict_decision_function_shape():
980
+ X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
981
+
982
+ preds = cross_val_predict(
983
+ LogisticRegression(solver="liblinear"), X, y, method="decision_function"
984
+ )
985
+ assert preds.shape == (50,)
986
+
987
+ X, y = load_iris(return_X_y=True)
988
+
989
+ preds = cross_val_predict(
990
+ LogisticRegression(solver="liblinear"), X, y, method="decision_function"
991
+ )
992
+ assert preds.shape == (150, 3)
993
+
994
+ # This specifically tests imbalanced splits for binary
995
+ # classification with decision_function. This is only
996
+ # applicable to classifiers that can be fit on a single
997
+ # class.
998
+ X = X[:100]
999
+ y = y[:100]
1000
+ error_message = (
1001
+ "Only 1 class/es in training fold,"
1002
+ " but 2 in overall dataset. This"
1003
+ " is not supported for decision_function"
1004
+ " with imbalanced folds. To fix "
1005
+ "this, use a cross-validation technique "
1006
+ "resulting in properly stratified folds"
1007
+ )
1008
+ with pytest.raises(ValueError, match=error_message):
1009
+ cross_val_predict(
1010
+ RidgeClassifier(), X, y, method="decision_function", cv=KFold(2)
1011
+ )
1012
+
1013
+ X, y = load_digits(return_X_y=True)
1014
+ est = SVC(kernel="linear", decision_function_shape="ovo")
1015
+
1016
+ preds = cross_val_predict(est, X, y, method="decision_function")
1017
+ assert preds.shape == (1797, 45)
1018
+
1019
+ ind = np.argsort(y)
1020
+ X, y = X[ind], y[ind]
1021
+ error_message_regexp = (
1022
+ r"Output shape \(599L?, 21L?\) of "
1023
+ "decision_function does not match number of "
1024
+ r"classes \(7\) in fold. Irregular "
1025
+ "decision_function .*"
1026
+ )
1027
+ with pytest.raises(ValueError, match=error_message_regexp):
1028
+ cross_val_predict(est, X, y, cv=KFold(n_splits=3), method="decision_function")
1029
+
1030
+
1031
+ def test_cross_val_predict_predict_proba_shape():
1032
+ X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
1033
+
1034
+ preds = cross_val_predict(
1035
+ LogisticRegression(solver="liblinear"), X, y, method="predict_proba"
1036
+ )
1037
+ assert preds.shape == (50, 2)
1038
+
1039
+ X, y = load_iris(return_X_y=True)
1040
+
1041
+ preds = cross_val_predict(
1042
+ LogisticRegression(solver="liblinear"), X, y, method="predict_proba"
1043
+ )
1044
+ assert preds.shape == (150, 3)
1045
+
1046
+
1047
+ def test_cross_val_predict_predict_log_proba_shape():
1048
+ X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
1049
+
1050
+ preds = cross_val_predict(
1051
+ LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba"
1052
+ )
1053
+ assert preds.shape == (50, 2)
1054
+
1055
+ X, y = load_iris(return_X_y=True)
1056
+
1057
+ preds = cross_val_predict(
1058
+ LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba"
1059
+ )
1060
+ assert preds.shape == (150, 3)
1061
+
1062
+
1063
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
1064
+ def test_cross_val_predict_input_types(coo_container):
1065
+ iris = load_iris()
1066
+ X, y = iris.data, iris.target
1067
+ X_sparse = coo_container(X)
1068
+ multioutput_y = np.column_stack([y, y[::-1]])
1069
+
1070
+ clf = Ridge(fit_intercept=False, random_state=0)
1071
+ # 3 fold cv is used --> at least 3 samples per class
1072
+ # Smoke test
1073
+ predictions = cross_val_predict(clf, X, y)
1074
+ assert predictions.shape == (150,)
1075
+
1076
+ # test with multioutput y
1077
+ predictions = cross_val_predict(clf, X_sparse, multioutput_y)
1078
+ assert predictions.shape == (150, 2)
1079
+
1080
+ predictions = cross_val_predict(clf, X_sparse, y)
1081
+ assert_array_equal(predictions.shape, (150,))
1082
+
1083
+ # test with multioutput y
1084
+ predictions = cross_val_predict(clf, X_sparse, multioutput_y)
1085
+ assert_array_equal(predictions.shape, (150, 2))
1086
+
1087
+ # test with X and y as list
1088
+ list_check = lambda x: isinstance(x, list)
1089
+ clf = CheckingClassifier(check_X=list_check)
1090
+ predictions = cross_val_predict(clf, X.tolist(), y.tolist())
1091
+
1092
+ clf = CheckingClassifier(check_y=list_check)
1093
+ predictions = cross_val_predict(clf, X, y.tolist())
1094
+
1095
+ # test with X and y as list and non empty method
1096
+ predictions = cross_val_predict(
1097
+ LogisticRegression(solver="liblinear"),
1098
+ X.tolist(),
1099
+ y.tolist(),
1100
+ method="decision_function",
1101
+ )
1102
+ predictions = cross_val_predict(
1103
+ LogisticRegression(solver="liblinear"),
1104
+ X,
1105
+ y.tolist(),
1106
+ method="decision_function",
1107
+ )
1108
+
1109
+ # test with 3d X and
1110
+ X_3d = X[:, :, np.newaxis]
1111
+ check_3d = lambda x: x.ndim == 3
1112
+ clf = CheckingClassifier(check_X=check_3d)
1113
+ predictions = cross_val_predict(clf, X_3d, y)
1114
+ assert_array_equal(predictions.shape, (150,))
1115
+
1116
+
1117
+ @pytest.mark.filterwarnings("ignore: Using or importing the ABCs from")
1118
+ # python3.7 deprecation warnings in pandas via matplotlib :-/
1119
+ def test_cross_val_predict_pandas():
1120
+ # check cross_val_score doesn't destroy pandas dataframe
1121
+ types = [(MockDataFrame, MockDataFrame)]
1122
+ try:
1123
+ from pandas import DataFrame, Series
1124
+
1125
+ types.append((Series, DataFrame))
1126
+ except ImportError:
1127
+ pass
1128
+ for TargetType, InputFeatureType in types:
1129
+ # X dataframe, y series
1130
+ X_df, y_ser = InputFeatureType(X), TargetType(y2)
1131
+ check_df = lambda x: isinstance(x, InputFeatureType)
1132
+ check_series = lambda x: isinstance(x, TargetType)
1133
+ clf = CheckingClassifier(check_X=check_df, check_y=check_series)
1134
+ cross_val_predict(clf, X_df, y_ser, cv=3)
1135
+
1136
+
1137
+ def test_cross_val_predict_unbalanced():
1138
+ X, y = make_classification(
1139
+ n_samples=100,
1140
+ n_features=2,
1141
+ n_redundant=0,
1142
+ n_informative=2,
1143
+ n_clusters_per_class=1,
1144
+ random_state=1,
1145
+ )
1146
+ # Change the first sample to a new class
1147
+ y[0] = 2
1148
+ clf = LogisticRegression(random_state=1, solver="liblinear")
1149
+ cv = StratifiedKFold(n_splits=2)
1150
+ train, test = list(cv.split(X, y))
1151
+ yhat_proba = cross_val_predict(clf, X, y, cv=cv, method="predict_proba")
1152
+ assert y[test[0]][0] == 2 # sanity check for further assertions
1153
+ assert np.all(yhat_proba[test[0]][:, 2] == 0)
1154
+ assert np.all(yhat_proba[test[0]][:, 0:1] > 0)
1155
+ assert np.all(yhat_proba[test[1]] > 0)
1156
+ assert_array_almost_equal(yhat_proba.sum(axis=1), np.ones(y.shape), decimal=12)
1157
+
1158
+
1159
+ def test_cross_val_predict_y_none():
1160
+ # ensure that cross_val_predict works when y is None
1161
+ mock_classifier = MockClassifier()
1162
+ rng = np.random.RandomState(42)
1163
+ X = rng.rand(100, 10)
1164
+ y_hat = cross_val_predict(mock_classifier, X, y=None, cv=5, method="predict")
1165
+ assert_allclose(X[:, 0], y_hat)
1166
+ y_hat_proba = cross_val_predict(
1167
+ mock_classifier, X, y=None, cv=5, method="predict_proba"
1168
+ )
1169
+ assert_allclose(X, y_hat_proba)
1170
+
1171
+
1172
+ @pytest.mark.parametrize("coo_container", COO_CONTAINERS)
1173
+ def test_cross_val_score_sparse_fit_params(coo_container):
1174
+ iris = load_iris()
1175
+ X, y = iris.data, iris.target
1176
+ clf = MockClassifier()
1177
+ fit_params = {"sparse_sample_weight": coo_container(np.eye(X.shape[0]))}
1178
+ a = cross_val_score(clf, X, y, params=fit_params, cv=3)
1179
+ assert_array_equal(a, np.ones(3))
1180
+
1181
+
1182
+ def test_learning_curve():
1183
+ n_samples = 30
1184
+ n_splits = 3
1185
+ X, y = make_classification(
1186
+ n_samples=n_samples,
1187
+ n_features=1,
1188
+ n_informative=1,
1189
+ n_redundant=0,
1190
+ n_classes=2,
1191
+ n_clusters_per_class=1,
1192
+ random_state=0,
1193
+ )
1194
+ estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
1195
+ for shuffle_train in [False, True]:
1196
+ with warnings.catch_warnings(record=True) as w:
1197
+ (
1198
+ train_sizes,
1199
+ train_scores,
1200
+ test_scores,
1201
+ fit_times,
1202
+ score_times,
1203
+ ) = learning_curve(
1204
+ estimator,
1205
+ X,
1206
+ y,
1207
+ cv=KFold(n_splits=n_splits),
1208
+ train_sizes=np.linspace(0.1, 1.0, 10),
1209
+ shuffle=shuffle_train,
1210
+ return_times=True,
1211
+ )
1212
+ if len(w) > 0:
1213
+ raise RuntimeError("Unexpected warning: %r" % w[0].message)
1214
+ assert train_scores.shape == (10, 3)
1215
+ assert test_scores.shape == (10, 3)
1216
+ assert fit_times.shape == (10, 3)
1217
+ assert score_times.shape == (10, 3)
1218
+ assert_array_equal(train_sizes, np.linspace(2, 20, 10))
1219
+ assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10))
1220
+ assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10))
1221
+
1222
+ # Cannot use assert_array_almost_equal for fit and score times because
1223
+ # the values are hardware-dependant
1224
+ assert fit_times.dtype == "float64"
1225
+ assert score_times.dtype == "float64"
1226
+
1227
+ # Test a custom cv splitter that can iterate only once
1228
+ with warnings.catch_warnings(record=True) as w:
1229
+ train_sizes2, train_scores2, test_scores2 = learning_curve(
1230
+ estimator,
1231
+ X,
1232
+ y,
1233
+ cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
1234
+ train_sizes=np.linspace(0.1, 1.0, 10),
1235
+ shuffle=shuffle_train,
1236
+ )
1237
+ if len(w) > 0:
1238
+ raise RuntimeError("Unexpected warning: %r" % w[0].message)
1239
+ assert_array_almost_equal(train_scores2, train_scores)
1240
+ assert_array_almost_equal(test_scores2, test_scores)
1241
+
1242
+
1243
+ def test_learning_curve_unsupervised():
1244
+ X, _ = make_classification(
1245
+ n_samples=30,
1246
+ n_features=1,
1247
+ n_informative=1,
1248
+ n_redundant=0,
1249
+ n_classes=2,
1250
+ n_clusters_per_class=1,
1251
+ random_state=0,
1252
+ )
1253
+ estimator = MockImprovingEstimator(20)
1254
+ train_sizes, train_scores, test_scores = learning_curve(
1255
+ estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10)
1256
+ )
1257
+ assert_array_equal(train_sizes, np.linspace(2, 20, 10))
1258
+ assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10))
1259
+ assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10))
1260
+
1261
+
1262
+ def test_learning_curve_verbose():
1263
+ X, y = make_classification(
1264
+ n_samples=30,
1265
+ n_features=1,
1266
+ n_informative=1,
1267
+ n_redundant=0,
1268
+ n_classes=2,
1269
+ n_clusters_per_class=1,
1270
+ random_state=0,
1271
+ )
1272
+ estimator = MockImprovingEstimator(20)
1273
+
1274
+ old_stdout = sys.stdout
1275
+ sys.stdout = StringIO()
1276
+ try:
1277
+ train_sizes, train_scores, test_scores = learning_curve(
1278
+ estimator, X, y, cv=3, verbose=1
1279
+ )
1280
+ finally:
1281
+ out = sys.stdout.getvalue()
1282
+ sys.stdout.close()
1283
+ sys.stdout = old_stdout
1284
+
1285
+ assert "[learning_curve]" in out
1286
+
1287
+
1288
+ def test_learning_curve_incremental_learning_not_possible():
1289
+ X, y = make_classification(
1290
+ n_samples=2,
1291
+ n_features=1,
1292
+ n_informative=1,
1293
+ n_redundant=0,
1294
+ n_classes=2,
1295
+ n_clusters_per_class=1,
1296
+ random_state=0,
1297
+ )
1298
+ # The mockup does not have partial_fit()
1299
+ estimator = MockImprovingEstimator(1)
1300
+ with pytest.raises(ValueError):
1301
+ learning_curve(estimator, X, y, exploit_incremental_learning=True)
1302
+
1303
+
1304
+ def test_learning_curve_incremental_learning():
1305
+ X, y = make_classification(
1306
+ n_samples=30,
1307
+ n_features=1,
1308
+ n_informative=1,
1309
+ n_redundant=0,
1310
+ n_classes=2,
1311
+ n_clusters_per_class=1,
1312
+ random_state=0,
1313
+ )
1314
+ estimator = MockIncrementalImprovingEstimator(20)
1315
+ for shuffle_train in [False, True]:
1316
+ train_sizes, train_scores, test_scores = learning_curve(
1317
+ estimator,
1318
+ X,
1319
+ y,
1320
+ cv=3,
1321
+ exploit_incremental_learning=True,
1322
+ train_sizes=np.linspace(0.1, 1.0, 10),
1323
+ shuffle=shuffle_train,
1324
+ )
1325
+ assert_array_equal(train_sizes, np.linspace(2, 20, 10))
1326
+ assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10))
1327
+ assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10))
1328
+
1329
+
1330
+ def test_learning_curve_incremental_learning_unsupervised():
1331
+ X, _ = make_classification(
1332
+ n_samples=30,
1333
+ n_features=1,
1334
+ n_informative=1,
1335
+ n_redundant=0,
1336
+ n_classes=2,
1337
+ n_clusters_per_class=1,
1338
+ random_state=0,
1339
+ )
1340
+ estimator = MockIncrementalImprovingEstimator(20)
1341
+ train_sizes, train_scores, test_scores = learning_curve(
1342
+ estimator,
1343
+ X,
1344
+ y=None,
1345
+ cv=3,
1346
+ exploit_incremental_learning=True,
1347
+ train_sizes=np.linspace(0.1, 1.0, 10),
1348
+ )
1349
+ assert_array_equal(train_sizes, np.linspace(2, 20, 10))
1350
+ assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10))
1351
+ assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10))
1352
+
1353
+
1354
+ def test_learning_curve_batch_and_incremental_learning_are_equal():
1355
+ X, y = make_classification(
1356
+ n_samples=30,
1357
+ n_features=1,
1358
+ n_informative=1,
1359
+ n_redundant=0,
1360
+ n_classes=2,
1361
+ n_clusters_per_class=1,
1362
+ random_state=0,
1363
+ )
1364
+ train_sizes = np.linspace(0.2, 1.0, 5)
1365
+ estimator = PassiveAggressiveClassifier(max_iter=1, tol=None, shuffle=False)
1366
+
1367
+ train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
1368
+ estimator,
1369
+ X,
1370
+ y,
1371
+ train_sizes=train_sizes,
1372
+ cv=3,
1373
+ exploit_incremental_learning=True,
1374
+ )
1375
+ train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
1376
+ estimator,
1377
+ X,
1378
+ y,
1379
+ cv=3,
1380
+ train_sizes=train_sizes,
1381
+ exploit_incremental_learning=False,
1382
+ )
1383
+
1384
+ assert_array_equal(train_sizes_inc, train_sizes_batch)
1385
+ assert_array_almost_equal(
1386
+ train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1)
1387
+ )
1388
+ assert_array_almost_equal(
1389
+ test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1)
1390
+ )
1391
+
1392
+
1393
+ def test_learning_curve_n_sample_range_out_of_bounds():
1394
+ X, y = make_classification(
1395
+ n_samples=30,
1396
+ n_features=1,
1397
+ n_informative=1,
1398
+ n_redundant=0,
1399
+ n_classes=2,
1400
+ n_clusters_per_class=1,
1401
+ random_state=0,
1402
+ )
1403
+ estimator = MockImprovingEstimator(20)
1404
+ with pytest.raises(ValueError):
1405
+ learning_curve(estimator, X, y, cv=3, train_sizes=[0, 1])
1406
+ with pytest.raises(ValueError):
1407
+ learning_curve(estimator, X, y, cv=3, train_sizes=[0.0, 1.0])
1408
+ with pytest.raises(ValueError):
1409
+ learning_curve(estimator, X, y, cv=3, train_sizes=[0.1, 1.1])
1410
+ with pytest.raises(ValueError):
1411
+ learning_curve(estimator, X, y, cv=3, train_sizes=[0, 20])
1412
+ with pytest.raises(ValueError):
1413
+ learning_curve(estimator, X, y, cv=3, train_sizes=[1, 21])
1414
+
1415
+
1416
+ def test_learning_curve_remove_duplicate_sample_sizes():
1417
+ X, y = make_classification(
1418
+ n_samples=3,
1419
+ n_features=1,
1420
+ n_informative=1,
1421
+ n_redundant=0,
1422
+ n_classes=2,
1423
+ n_clusters_per_class=1,
1424
+ random_state=0,
1425
+ )
1426
+ estimator = MockImprovingEstimator(2)
1427
+ warning_message = (
1428
+ "Removed duplicate entries from 'train_sizes'. Number of ticks "
1429
+ "will be less than the size of 'train_sizes': 2 instead of 3."
1430
+ )
1431
+ with pytest.warns(RuntimeWarning, match=warning_message):
1432
+ train_sizes, _, _ = learning_curve(
1433
+ estimator, X, y, cv=3, train_sizes=np.linspace(0.33, 1.0, 3)
1434
+ )
1435
+ assert_array_equal(train_sizes, [1, 2])
1436
+
1437
+
1438
+ def test_learning_curve_with_boolean_indices():
1439
+ X, y = make_classification(
1440
+ n_samples=30,
1441
+ n_features=1,
1442
+ n_informative=1,
1443
+ n_redundant=0,
1444
+ n_classes=2,
1445
+ n_clusters_per_class=1,
1446
+ random_state=0,
1447
+ )
1448
+ estimator = MockImprovingEstimator(20)
1449
+ cv = KFold(n_splits=3)
1450
+ train_sizes, train_scores, test_scores = learning_curve(
1451
+ estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10)
1452
+ )
1453
+ assert_array_equal(train_sizes, np.linspace(2, 20, 10))
1454
+ assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10))
1455
+ assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10))
1456
+
1457
+
1458
+ def test_learning_curve_with_shuffle():
1459
+ # Following test case was designed this way to verify the code
1460
+ # changes made in pull request: #7506.
1461
+ X = np.array(
1462
+ [
1463
+ [1, 2],
1464
+ [3, 4],
1465
+ [5, 6],
1466
+ [7, 8],
1467
+ [11, 12],
1468
+ [13, 14],
1469
+ [15, 16],
1470
+ [17, 18],
1471
+ [19, 20],
1472
+ [7, 8],
1473
+ [9, 10],
1474
+ [11, 12],
1475
+ [13, 14],
1476
+ [15, 16],
1477
+ [17, 18],
1478
+ ]
1479
+ )
1480
+ y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
1481
+ groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
1482
+ # Splits on these groups fail without shuffle as the first iteration
1483
+ # of the learning curve doesn't contain label 4 in the training set.
1484
+ estimator = PassiveAggressiveClassifier(max_iter=5, tol=None, shuffle=False)
1485
+
1486
+ cv = GroupKFold(n_splits=2)
1487
+ train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
1488
+ estimator,
1489
+ X,
1490
+ y,
1491
+ cv=cv,
1492
+ n_jobs=1,
1493
+ train_sizes=np.linspace(0.3, 1.0, 3),
1494
+ groups=groups,
1495
+ shuffle=True,
1496
+ random_state=2,
1497
+ )
1498
+ assert_array_almost_equal(
1499
+ train_scores_batch.mean(axis=1), np.array([0.75, 0.3, 0.36111111])
1500
+ )
1501
+ assert_array_almost_equal(
1502
+ test_scores_batch.mean(axis=1), np.array([0.36111111, 0.25, 0.25])
1503
+ )
1504
+ with pytest.raises(ValueError):
1505
+ learning_curve(
1506
+ estimator,
1507
+ X,
1508
+ y,
1509
+ cv=cv,
1510
+ n_jobs=1,
1511
+ train_sizes=np.linspace(0.3, 1.0, 3),
1512
+ groups=groups,
1513
+ error_score="raise",
1514
+ )
1515
+
1516
+ train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
1517
+ estimator,
1518
+ X,
1519
+ y,
1520
+ cv=cv,
1521
+ n_jobs=1,
1522
+ train_sizes=np.linspace(0.3, 1.0, 3),
1523
+ groups=groups,
1524
+ shuffle=True,
1525
+ random_state=2,
1526
+ exploit_incremental_learning=True,
1527
+ )
1528
+ assert_array_almost_equal(
1529
+ train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1)
1530
+ )
1531
+ assert_array_almost_equal(
1532
+ test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1)
1533
+ )
1534
+
1535
+
1536
+ def test_learning_curve_fit_params():
1537
+ X = np.arange(100).reshape(10, 10)
1538
+ y = np.array([0] * 5 + [1] * 5)
1539
+ clf = CheckingClassifier(expected_sample_weight=True)
1540
+
1541
+ err_msg = r"Expected sample_weight to be passed"
1542
+ with pytest.raises(AssertionError, match=err_msg):
1543
+ learning_curve(clf, X, y, error_score="raise")
1544
+
1545
+ err_msg = r"sample_weight.shape == \(1,\), expected \(2,\)!"
1546
+ with pytest.raises(ValueError, match=err_msg):
1547
+ learning_curve(
1548
+ clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(1)}
1549
+ )
1550
+ learning_curve(
1551
+ clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(10)}
1552
+ )
1553
+
1554
+
1555
+ def test_learning_curve_incremental_learning_fit_params():
1556
+ X, y = make_classification(
1557
+ n_samples=30,
1558
+ n_features=1,
1559
+ n_informative=1,
1560
+ n_redundant=0,
1561
+ n_classes=2,
1562
+ n_clusters_per_class=1,
1563
+ random_state=0,
1564
+ )
1565
+ estimator = MockIncrementalImprovingEstimator(20, ["sample_weight"])
1566
+ err_msg = r"Expected fit parameter\(s\) \['sample_weight'\] not seen."
1567
+ with pytest.raises(AssertionError, match=err_msg):
1568
+ learning_curve(
1569
+ estimator,
1570
+ X,
1571
+ y,
1572
+ cv=3,
1573
+ exploit_incremental_learning=True,
1574
+ train_sizes=np.linspace(0.1, 1.0, 10),
1575
+ error_score="raise",
1576
+ )
1577
+
1578
+ err_msg = "Fit parameter sample_weight has length 3; expected"
1579
+ with pytest.raises(AssertionError, match=err_msg):
1580
+ learning_curve(
1581
+ estimator,
1582
+ X,
1583
+ y,
1584
+ cv=3,
1585
+ exploit_incremental_learning=True,
1586
+ train_sizes=np.linspace(0.1, 1.0, 10),
1587
+ error_score="raise",
1588
+ fit_params={"sample_weight": np.ones(3)},
1589
+ )
1590
+
1591
+ learning_curve(
1592
+ estimator,
1593
+ X,
1594
+ y,
1595
+ cv=3,
1596
+ exploit_incremental_learning=True,
1597
+ train_sizes=np.linspace(0.1, 1.0, 10),
1598
+ error_score="raise",
1599
+ fit_params={"sample_weight": np.ones(2)},
1600
+ )
1601
+
1602
+
1603
+ def test_validation_curve():
1604
+ X, y = make_classification(
1605
+ n_samples=2,
1606
+ n_features=1,
1607
+ n_informative=1,
1608
+ n_redundant=0,
1609
+ n_classes=2,
1610
+ n_clusters_per_class=1,
1611
+ random_state=0,
1612
+ )
1613
+ param_range = np.linspace(0, 1, 10)
1614
+ with warnings.catch_warnings(record=True) as w:
1615
+ train_scores, test_scores = validation_curve(
1616
+ MockEstimatorWithParameter(),
1617
+ X,
1618
+ y,
1619
+ param_name="param",
1620
+ param_range=param_range,
1621
+ cv=2,
1622
+ )
1623
+ if len(w) > 0:
1624
+ raise RuntimeError("Unexpected warning: %r" % w[0].message)
1625
+
1626
+ assert_array_almost_equal(train_scores.mean(axis=1), param_range)
1627
+ assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
1628
+
1629
+
1630
+ def test_validation_curve_clone_estimator():
1631
+ X, y = make_classification(
1632
+ n_samples=2,
1633
+ n_features=1,
1634
+ n_informative=1,
1635
+ n_redundant=0,
1636
+ n_classes=2,
1637
+ n_clusters_per_class=1,
1638
+ random_state=0,
1639
+ )
1640
+
1641
+ param_range = np.linspace(1, 0, 10)
1642
+ _, _ = validation_curve(
1643
+ MockEstimatorWithSingleFitCallAllowed(),
1644
+ X,
1645
+ y,
1646
+ param_name="param",
1647
+ param_range=param_range,
1648
+ cv=2,
1649
+ )
1650
+
1651
+
1652
+ def test_validation_curve_cv_splits_consistency():
1653
+ n_samples = 100
1654
+ n_splits = 5
1655
+ X, y = make_classification(n_samples=100, random_state=0)
1656
+
1657
+ scores1 = validation_curve(
1658
+ SVC(kernel="linear", random_state=0),
1659
+ X,
1660
+ y,
1661
+ param_name="C",
1662
+ param_range=[0.1, 0.1, 0.2, 0.2],
1663
+ cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
1664
+ )
1665
+ # The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
1666
+ # `split` is called for each parameter, the following should produce
1667
+ # identical results for param setting 1 and param setting 2 as both have
1668
+ # the same C value.
1669
+ assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :], 2))
1670
+
1671
+ scores2 = validation_curve(
1672
+ SVC(kernel="linear", random_state=0),
1673
+ X,
1674
+ y,
1675
+ param_name="C",
1676
+ param_range=[0.1, 0.1, 0.2, 0.2],
1677
+ cv=KFold(n_splits=n_splits, shuffle=True),
1678
+ )
1679
+
1680
+ # For scores2, compare the 1st and 2nd parameter's scores
1681
+ # (Since the C value for 1st two param setting is 0.1, they must be
1682
+ # consistent unless the train test folds differ between the param settings)
1683
+ assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :], 2))
1684
+
1685
+ scores3 = validation_curve(
1686
+ SVC(kernel="linear", random_state=0),
1687
+ X,
1688
+ y,
1689
+ param_name="C",
1690
+ param_range=[0.1, 0.1, 0.2, 0.2],
1691
+ cv=KFold(n_splits=n_splits),
1692
+ )
1693
+
1694
+ # OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
1695
+ assert_array_almost_equal(np.array(scores3), np.array(scores1))
1696
+
1697
+
1698
+ def test_validation_curve_fit_params():
1699
+ X = np.arange(100).reshape(10, 10)
1700
+ y = np.array([0] * 5 + [1] * 5)
1701
+ clf = CheckingClassifier(expected_sample_weight=True)
1702
+
1703
+ err_msg = r"Expected sample_weight to be passed"
1704
+ with pytest.raises(AssertionError, match=err_msg):
1705
+ validation_curve(
1706
+ clf,
1707
+ X,
1708
+ y,
1709
+ param_name="foo_param",
1710
+ param_range=[1, 2, 3],
1711
+ error_score="raise",
1712
+ )
1713
+
1714
+ err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!"
1715
+ with pytest.raises(ValueError, match=err_msg):
1716
+ validation_curve(
1717
+ clf,
1718
+ X,
1719
+ y,
1720
+ param_name="foo_param",
1721
+ param_range=[1, 2, 3],
1722
+ error_score="raise",
1723
+ fit_params={"sample_weight": np.ones(1)},
1724
+ )
1725
+ validation_curve(
1726
+ clf,
1727
+ X,
1728
+ y,
1729
+ param_name="foo_param",
1730
+ param_range=[1, 2, 3],
1731
+ error_score="raise",
1732
+ fit_params={"sample_weight": np.ones(10)},
1733
+ )
1734
+
1735
+
1736
+ def test_check_is_permutation():
1737
+ rng = np.random.RandomState(0)
1738
+ p = np.arange(100)
1739
+ rng.shuffle(p)
1740
+ assert _check_is_permutation(p, 100)
1741
+ assert not _check_is_permutation(np.delete(p, 23), 100)
1742
+
1743
+ p[0] = 23
1744
+ assert not _check_is_permutation(p, 100)
1745
+
1746
+ # Check if the additional duplicate indices are caught
1747
+ assert not _check_is_permutation(np.hstack((p, 0)), 100)
1748
+
1749
+
1750
+ @pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
1751
+ def test_cross_val_predict_sparse_prediction(csr_container):
1752
+ # check that cross_val_predict gives same result for sparse and dense input
1753
+ X, y = make_multilabel_classification(
1754
+ n_classes=2,
1755
+ n_labels=1,
1756
+ allow_unlabeled=False,
1757
+ return_indicator=True,
1758
+ random_state=1,
1759
+ )
1760
+ X_sparse = csr_container(X)
1761
+ y_sparse = csr_container(y)
1762
+ classif = OneVsRestClassifier(SVC(kernel="linear"))
1763
+ preds = cross_val_predict(classif, X, y, cv=10)
1764
+ preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
1765
+ preds_sparse = preds_sparse.toarray()
1766
+ assert_array_almost_equal(preds_sparse, preds)
1767
+
1768
+
1769
+ def check_cross_val_predict_binary(est, X, y, method):
1770
+ """Helper for tests of cross_val_predict with binary classification"""
1771
+ cv = KFold(n_splits=3, shuffle=False)
1772
+
1773
+ # Generate expected outputs
1774
+ if y.ndim == 1:
1775
+ exp_shape = (len(X),) if method == "decision_function" else (len(X), 2)
1776
+ else:
1777
+ exp_shape = y.shape
1778
+ expected_predictions = np.zeros(exp_shape)
1779
+ for train, test in cv.split(X, y):
1780
+ est = clone(est).fit(X[train], y[train])
1781
+ expected_predictions[test] = getattr(est, method)(X[test])
1782
+
1783
+ # Check actual outputs for several representations of y
1784
+ for tg in [y, y + 1, y - 2, y.astype("str")]:
1785
+ assert_allclose(
1786
+ cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions
1787
+ )
1788
+
1789
+
1790
+ def check_cross_val_predict_multiclass(est, X, y, method):
1791
+ """Helper for tests of cross_val_predict with multiclass classification"""
1792
+ cv = KFold(n_splits=3, shuffle=False)
1793
+
1794
+ # Generate expected outputs
1795
+ float_min = np.finfo(np.float64).min
1796
+ default_values = {
1797
+ "decision_function": float_min,
1798
+ "predict_log_proba": float_min,
1799
+ "predict_proba": 0,
1800
+ }
1801
+ expected_predictions = np.full(
1802
+ (len(X), len(set(y))), default_values[method], dtype=np.float64
1803
+ )
1804
+ _, y_enc = np.unique(y, return_inverse=True)
1805
+ for train, test in cv.split(X, y_enc):
1806
+ est = clone(est).fit(X[train], y_enc[train])
1807
+ fold_preds = getattr(est, method)(X[test])
1808
+ i_cols_fit = np.unique(y_enc[train])
1809
+ expected_predictions[np.ix_(test, i_cols_fit)] = fold_preds
1810
+
1811
+ # Check actual outputs for several representations of y
1812
+ for tg in [y, y + 1, y - 2, y.astype("str")]:
1813
+ assert_allclose(
1814
+ cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions
1815
+ )
1816
+
1817
+
1818
+ def check_cross_val_predict_multilabel(est, X, y, method):
1819
+ """Check the output of cross_val_predict for 2D targets using
1820
+ Estimators which provide a predictions as a list with one
1821
+ element per class.
1822
+ """
1823
+ cv = KFold(n_splits=3, shuffle=False)
1824
+
1825
+ # Create empty arrays of the correct size to hold outputs
1826
+ float_min = np.finfo(np.float64).min
1827
+ default_values = {
1828
+ "decision_function": float_min,
1829
+ "predict_log_proba": float_min,
1830
+ "predict_proba": 0,
1831
+ }
1832
+ n_targets = y.shape[1]
1833
+ expected_preds = []
1834
+ for i_col in range(n_targets):
1835
+ n_classes_in_label = len(set(y[:, i_col]))
1836
+ if n_classes_in_label == 2 and method == "decision_function":
1837
+ exp_shape = (len(X),)
1838
+ else:
1839
+ exp_shape = (len(X), n_classes_in_label)
1840
+ expected_preds.append(
1841
+ np.full(exp_shape, default_values[method], dtype=np.float64)
1842
+ )
1843
+
1844
+ # Generate expected outputs
1845
+ y_enc_cols = [
1846
+ np.unique(y[:, i], return_inverse=True)[1][:, np.newaxis]
1847
+ for i in range(y.shape[1])
1848
+ ]
1849
+ y_enc = np.concatenate(y_enc_cols, axis=1)
1850
+ for train, test in cv.split(X, y_enc):
1851
+ est = clone(est).fit(X[train], y_enc[train])
1852
+ fold_preds = getattr(est, method)(X[test])
1853
+ for i_col in range(n_targets):
1854
+ fold_cols = np.unique(y_enc[train][:, i_col])
1855
+ if expected_preds[i_col].ndim == 1:
1856
+ # Decision function with <=2 classes
1857
+ expected_preds[i_col][test] = fold_preds[i_col]
1858
+ else:
1859
+ idx = np.ix_(test, fold_cols)
1860
+ expected_preds[i_col][idx] = fold_preds[i_col]
1861
+
1862
+ # Check actual outputs for several representations of y
1863
+ for tg in [y, y + 1, y - 2, y.astype("str")]:
1864
+ cv_predict_output = cross_val_predict(est, X, tg, method=method, cv=cv)
1865
+ assert len(cv_predict_output) == len(expected_preds)
1866
+ for i in range(len(cv_predict_output)):
1867
+ assert_allclose(cv_predict_output[i], expected_preds[i])
1868
+
1869
+
1870
+ def check_cross_val_predict_with_method_binary(est):
1871
+ # This test includes the decision_function with two classes.
1872
+ # This is a special case: it has only one column of output.
1873
+ X, y = make_classification(n_classes=2, random_state=0)
1874
+ for method in ["decision_function", "predict_proba", "predict_log_proba"]:
1875
+ check_cross_val_predict_binary(est, X, y, method)
1876
+
1877
+
1878
+ def check_cross_val_predict_with_method_multiclass(est):
1879
+ iris = load_iris()
1880
+ X, y = iris.data, iris.target
1881
+ X, y = shuffle(X, y, random_state=0)
1882
+ for method in ["decision_function", "predict_proba", "predict_log_proba"]:
1883
+ check_cross_val_predict_multiclass(est, X, y, method)
1884
+
1885
+
1886
+ def test_cross_val_predict_with_method():
1887
+ check_cross_val_predict_with_method_binary(LogisticRegression(solver="liblinear"))
1888
+ check_cross_val_predict_with_method_multiclass(
1889
+ LogisticRegression(solver="liblinear")
1890
+ )
1891
+
1892
+
1893
+ def test_cross_val_predict_method_checking():
1894
+ # Regression test for issue #9639. Tests that cross_val_predict does not
1895
+ # check estimator methods (e.g. predict_proba) before fitting
1896
+ iris = load_iris()
1897
+ X, y = iris.data, iris.target
1898
+ X, y = shuffle(X, y, random_state=0)
1899
+ for method in ["decision_function", "predict_proba", "predict_log_proba"]:
1900
+ est = SGDClassifier(loss="log_loss", random_state=2)
1901
+ check_cross_val_predict_multiclass(est, X, y, method)
1902
+
1903
+
1904
+ def test_gridsearchcv_cross_val_predict_with_method():
1905
+ iris = load_iris()
1906
+ X, y = iris.data, iris.target
1907
+ X, y = shuffle(X, y, random_state=0)
1908
+ est = GridSearchCV(
1909
+ LogisticRegression(random_state=42, solver="liblinear"), {"C": [0.1, 1]}, cv=2
1910
+ )
1911
+ for method in ["decision_function", "predict_proba", "predict_log_proba"]:
1912
+ check_cross_val_predict_multiclass(est, X, y, method)
1913
+
1914
+
1915
+ def test_cross_val_predict_with_method_multilabel_ovr():
1916
+ # OVR does multilabel predictions, but only arrays of
1917
+ # binary indicator columns. The output of predict_proba
1918
+ # is a 2D array with shape (n_samples, n_classes).
1919
+ n_samp = 100
1920
+ n_classes = 4
1921
+ X, y = make_multilabel_classification(
1922
+ n_samples=n_samp, n_labels=3, n_classes=n_classes, n_features=5, random_state=42
1923
+ )
1924
+ est = OneVsRestClassifier(LogisticRegression(solver="liblinear", random_state=0))
1925
+ for method in ["predict_proba", "decision_function"]:
1926
+ check_cross_val_predict_binary(est, X, y, method=method)
1927
+
1928
+
1929
+ class RFWithDecisionFunction(RandomForestClassifier):
1930
+ # None of the current multioutput-multiclass estimators have
1931
+ # decision function methods. Create a mock decision function
1932
+ # to test the cross_val_predict function's handling of this case.
1933
+ def decision_function(self, X):
1934
+ probs = self.predict_proba(X)
1935
+ msg = "This helper should only be used on multioutput-multiclass tasks"
1936
+ assert isinstance(probs, list), msg
1937
+ probs = [p[:, -1] if p.shape[1] == 2 else p for p in probs]
1938
+ return probs
1939
+
1940
+
1941
+ def test_cross_val_predict_with_method_multilabel_rf():
1942
+ # The RandomForest allows multiple classes in each label.
1943
+ # Output of predict_proba is a list of outputs of predict_proba
1944
+ # for each individual label.
1945
+ n_classes = 4
1946
+ X, y = make_multilabel_classification(
1947
+ n_samples=100, n_labels=3, n_classes=n_classes, n_features=5, random_state=42
1948
+ )
1949
+ y[:, 0] += y[:, 1] # Put three classes in the first column
1950
+ for method in ["predict_proba", "predict_log_proba", "decision_function"]:
1951
+ est = RFWithDecisionFunction(n_estimators=5, random_state=0)
1952
+ with warnings.catch_warnings():
1953
+ # Suppress "RuntimeWarning: divide by zero encountered in log"
1954
+ warnings.simplefilter("ignore")
1955
+ check_cross_val_predict_multilabel(est, X, y, method=method)
1956
+
1957
+
1958
+ def test_cross_val_predict_with_method_rare_class():
1959
+ # Test a multiclass problem where one class will be missing from
1960
+ # one of the CV training sets.
1961
+ rng = np.random.RandomState(0)
1962
+ X = rng.normal(0, 1, size=(14, 10))
1963
+ y = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 3])
1964
+ est = LogisticRegression(solver="liblinear")
1965
+ for method in ["predict_proba", "predict_log_proba", "decision_function"]:
1966
+ with warnings.catch_warnings():
1967
+ # Suppress warning about too few examples of a class
1968
+ warnings.simplefilter("ignore")
1969
+ check_cross_val_predict_multiclass(est, X, y, method)
1970
+
1971
+
1972
+ def test_cross_val_predict_with_method_multilabel_rf_rare_class():
1973
+ # The RandomForest allows anything for the contents of the labels.
1974
+ # Output of predict_proba is a list of outputs of predict_proba
1975
+ # for each individual label.
1976
+ # In this test, the first label has a class with a single example.
1977
+ # We'll have one CV fold where the training data don't include it.
1978
+ rng = np.random.RandomState(0)
1979
+ X = rng.normal(0, 1, size=(5, 10))
1980
+ y = np.array([[0, 0], [1, 1], [2, 1], [0, 1], [1, 0]])
1981
+ for method in ["predict_proba", "predict_log_proba"]:
1982
+ est = RFWithDecisionFunction(n_estimators=5, random_state=0)
1983
+ with warnings.catch_warnings():
1984
+ # Suppress "RuntimeWarning: divide by zero encountered in log"
1985
+ warnings.simplefilter("ignore")
1986
+ check_cross_val_predict_multilabel(est, X, y, method=method)
1987
+
1988
+
1989
+ def get_expected_predictions(X, y, cv, classes, est, method):
1990
+ expected_predictions = np.zeros([len(y), classes])
1991
+ func = getattr(est, method)
1992
+
1993
+ for train, test in cv.split(X, y):
1994
+ est.fit(X[train], y[train])
1995
+ expected_predictions_ = func(X[test])
1996
+ # To avoid 2 dimensional indexing
1997
+ if method == "predict_proba":
1998
+ exp_pred_test = np.zeros((len(test), classes))
1999
+ else:
2000
+ exp_pred_test = np.full(
2001
+ (len(test), classes), np.finfo(expected_predictions.dtype).min
2002
+ )
2003
+ exp_pred_test[:, est.classes_] = expected_predictions_
2004
+ expected_predictions[test] = exp_pred_test
2005
+
2006
+ return expected_predictions
2007
+
2008
+
2009
+ def test_cross_val_predict_class_subset():
2010
+ X = np.arange(200).reshape(100, 2)
2011
+ y = np.array([x // 10 for x in range(100)])
2012
+ classes = 10
2013
+
2014
+ kfold3 = KFold(n_splits=3)
2015
+ kfold4 = KFold(n_splits=4)
2016
+
2017
+ le = LabelEncoder()
2018
+
2019
+ methods = ["decision_function", "predict_proba", "predict_log_proba"]
2020
+ for method in methods:
2021
+ est = LogisticRegression(solver="liblinear")
2022
+
2023
+ # Test with n_splits=3
2024
+ predictions = cross_val_predict(est, X, y, method=method, cv=kfold3)
2025
+
2026
+ # Runs a naive loop (should be same as cross_val_predict):
2027
+ expected_predictions = get_expected_predictions(
2028
+ X, y, kfold3, classes, est, method
2029
+ )
2030
+ assert_array_almost_equal(expected_predictions, predictions)
2031
+
2032
+ # Test with n_splits=4
2033
+ predictions = cross_val_predict(est, X, y, method=method, cv=kfold4)
2034
+ expected_predictions = get_expected_predictions(
2035
+ X, y, kfold4, classes, est, method
2036
+ )
2037
+ assert_array_almost_equal(expected_predictions, predictions)
2038
+
2039
+ # Testing unordered labels
2040
+ y = shuffle(np.repeat(range(10), 10), random_state=0)
2041
+ predictions = cross_val_predict(est, X, y, method=method, cv=kfold3)
2042
+ y = le.fit_transform(y)
2043
+ expected_predictions = get_expected_predictions(
2044
+ X, y, kfold3, classes, est, method
2045
+ )
2046
+ assert_array_almost_equal(expected_predictions, predictions)
2047
+
2048
+
2049
+ def test_score_memmap():
2050
+ # Ensure a scalar score of memmap type is accepted
2051
+ iris = load_iris()
2052
+ X, y = iris.data, iris.target
2053
+ clf = MockClassifier()
2054
+ tf = tempfile.NamedTemporaryFile(mode="wb", delete=False)
2055
+ tf.write(b"Hello world!!!!!")
2056
+ tf.close()
2057
+ scores = np.memmap(tf.name, dtype=np.float64)
2058
+ score = np.memmap(tf.name, shape=(), mode="r", dtype=np.float64)
2059
+ try:
2060
+ cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
2061
+ with pytest.raises(ValueError):
2062
+ cross_val_score(clf, X, y, scoring=lambda est, X, y: scores)
2063
+ finally:
2064
+ # Best effort to release the mmap file handles before deleting the
2065
+ # backing file under Windows
2066
+ scores, score = None, None
2067
+ for _ in range(3):
2068
+ try:
2069
+ os.unlink(tf.name)
2070
+ break
2071
+ except OSError:
2072
+ sleep(1.0)
2073
+
2074
+
2075
+ @pytest.mark.filterwarnings("ignore: Using or importing the ABCs from")
2076
+ def test_permutation_test_score_pandas():
2077
+ # check permutation_test_score doesn't destroy pandas dataframe
2078
+ types = [(MockDataFrame, MockDataFrame)]
2079
+ try:
2080
+ from pandas import DataFrame, Series
2081
+
2082
+ types.append((Series, DataFrame))
2083
+ except ImportError:
2084
+ pass
2085
+ for TargetType, InputFeatureType in types:
2086
+ # X dataframe, y series
2087
+ iris = load_iris()
2088
+ X, y = iris.data, iris.target
2089
+ X_df, y_ser = InputFeatureType(X), TargetType(y)
2090
+ check_df = lambda x: isinstance(x, InputFeatureType)
2091
+ check_series = lambda x: isinstance(x, TargetType)
2092
+ clf = CheckingClassifier(check_X=check_df, check_y=check_series)
2093
+ permutation_test_score(clf, X_df, y_ser)
2094
+
2095
+
2096
+ def test_fit_and_score_failing():
2097
+ # Create a failing classifier to deliberately fail
2098
+ failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER)
2099
+ # dummy X data
2100
+ X = np.arange(1, 10)
2101
+ fit_and_score_args = dict(
2102
+ estimator=failing_clf,
2103
+ X=X,
2104
+ y=None,
2105
+ scorer=dict(),
2106
+ train=None,
2107
+ test=None,
2108
+ verbose=0,
2109
+ parameters=None,
2110
+ fit_params=None,
2111
+ score_params=None,
2112
+ )
2113
+ # passing error score to trigger the warning message
2114
+ fit_and_score_args["error_score"] = "raise"
2115
+ # check if exception was raised, with default error_score='raise'
2116
+ with pytest.raises(ValueError, match="Failing classifier failed as required"):
2117
+ _fit_and_score(**fit_and_score_args)
2118
+
2119
+ assert failing_clf.score() == 0.0 # FailingClassifier coverage
2120
+
2121
+
2122
+ def test_fit_and_score_working():
2123
+ X, y = make_classification(n_samples=30, random_state=0)
2124
+ clf = SVC(kernel="linear", random_state=0)
2125
+ train, test = next(ShuffleSplit().split(X))
2126
+ # Test return_parameters option
2127
+ fit_and_score_args = dict(
2128
+ estimator=clf,
2129
+ X=X,
2130
+ y=y,
2131
+ scorer=dict(),
2132
+ train=train,
2133
+ test=test,
2134
+ verbose=0,
2135
+ parameters={"max_iter": 100, "tol": 0.1},
2136
+ fit_params=None,
2137
+ score_params=None,
2138
+ return_parameters=True,
2139
+ )
2140
+ result = _fit_and_score(**fit_and_score_args)
2141
+ assert result["parameters"] == fit_and_score_args["parameters"]
2142
+
2143
+
2144
+ class DataDependentFailingClassifier(BaseEstimator):
2145
+ def __init__(self, max_x_value=None):
2146
+ self.max_x_value = max_x_value
2147
+
2148
+ def fit(self, X, y=None):
2149
+ num_values_too_high = (X > self.max_x_value).sum()
2150
+ if num_values_too_high:
2151
+ raise ValueError(
2152
+ f"Classifier fit failed with {num_values_too_high} values too high"
2153
+ )
2154
+
2155
+ def score(self, X=None, Y=None):
2156
+ return 0.0
2157
+
2158
+
2159
+ @pytest.mark.parametrize("error_score", [np.nan, 0])
2160
+ def test_cross_validate_some_failing_fits_warning(error_score):
2161
+ # Create a failing classifier to deliberately fail
2162
+ failing_clf = DataDependentFailingClassifier(max_x_value=8)
2163
+ # dummy X data
2164
+ X = np.arange(1, 10)
2165
+ y = np.ones(9)
2166
+ # passing error score to trigger the warning message
2167
+ cross_validate_args = [failing_clf, X, y]
2168
+ cross_validate_kwargs = {"cv": 3, "error_score": error_score}
2169
+ # check if the warning message type is as expected
2170
+
2171
+ individual_fit_error_message = (
2172
+ "ValueError: Classifier fit failed with 1 values too high"
2173
+ )
2174
+ warning_message = re.compile(
2175
+ (
2176
+ "2 fits failed.+total of 3.+The score on these"
2177
+ " train-test partitions for these parameters will be set to"
2178
+ f" {cross_validate_kwargs['error_score']}.+{individual_fit_error_message}"
2179
+ ),
2180
+ flags=re.DOTALL,
2181
+ )
2182
+
2183
+ with pytest.warns(FitFailedWarning, match=warning_message):
2184
+ cross_validate(*cross_validate_args, **cross_validate_kwargs)
2185
+
2186
+
2187
+ @pytest.mark.parametrize("error_score", [np.nan, 0])
2188
+ def test_cross_validate_all_failing_fits_error(error_score):
2189
+ # Create a failing classifier to deliberately fail
2190
+ failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER)
2191
+ # dummy X data
2192
+ X = np.arange(1, 10)
2193
+ y = np.ones(9)
2194
+
2195
+ cross_validate_args = [failing_clf, X, y]
2196
+ cross_validate_kwargs = {"cv": 7, "error_score": error_score}
2197
+
2198
+ individual_fit_error_message = "ValueError: Failing classifier failed as required"
2199
+ error_message = re.compile(
2200
+ (
2201
+ "All the 7 fits failed.+your model is misconfigured.+"
2202
+ f"{individual_fit_error_message}"
2203
+ ),
2204
+ flags=re.DOTALL,
2205
+ )
2206
+
2207
+ with pytest.raises(ValueError, match=error_message):
2208
+ cross_validate(*cross_validate_args, **cross_validate_kwargs)
2209
+
2210
+
2211
+ def _failing_scorer(estimator, X, y, error_msg):
2212
+ raise ValueError(error_msg)
2213
+
2214
+
2215
+ @pytest.mark.filterwarnings("ignore:lbfgs failed to converge")
2216
+ @pytest.mark.parametrize("error_score", [np.nan, 0, "raise"])
2217
+ def test_cross_val_score_failing_scorer(error_score):
2218
+ # check that an estimator can fail during scoring in `cross_val_score` and
2219
+ # that we can optionally replaced it with `error_score`
2220
+ X, y = load_iris(return_X_y=True)
2221
+ clf = LogisticRegression(max_iter=5).fit(X, y)
2222
+
2223
+ error_msg = "This scorer is supposed to fail!!!"
2224
+ failing_scorer = partial(_failing_scorer, error_msg=error_msg)
2225
+
2226
+ if error_score == "raise":
2227
+ with pytest.raises(ValueError, match=error_msg):
2228
+ cross_val_score(
2229
+ clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score
2230
+ )
2231
+ else:
2232
+ warning_msg = (
2233
+ "Scoring failed. The score on this train-test partition for "
2234
+ f"these parameters will be set to {error_score}"
2235
+ )
2236
+ with pytest.warns(UserWarning, match=warning_msg):
2237
+ scores = cross_val_score(
2238
+ clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score
2239
+ )
2240
+ assert_allclose(scores, error_score)
2241
+
2242
+
2243
+ @pytest.mark.filterwarnings("ignore:lbfgs failed to converge")
2244
+ @pytest.mark.parametrize("error_score", [np.nan, 0, "raise"])
2245
+ @pytest.mark.parametrize("return_train_score", [True, False])
2246
+ @pytest.mark.parametrize("with_multimetric", [False, True])
2247
+ def test_cross_validate_failing_scorer(
2248
+ error_score, return_train_score, with_multimetric
2249
+ ):
2250
+ # Check that an estimator can fail during scoring in `cross_validate` and
2251
+ # that we can optionally replace it with `error_score`. In the multimetric
2252
+ # case also check the result of a non-failing scorer where the other scorers
2253
+ # are failing.
2254
+ X, y = load_iris(return_X_y=True)
2255
+ clf = LogisticRegression(max_iter=5).fit(X, y)
2256
+
2257
+ error_msg = "This scorer is supposed to fail!!!"
2258
+ failing_scorer = partial(_failing_scorer, error_msg=error_msg)
2259
+ if with_multimetric:
2260
+ non_failing_scorer = make_scorer(mean_squared_error)
2261
+ scoring = {
2262
+ "score_1": failing_scorer,
2263
+ "score_2": non_failing_scorer,
2264
+ "score_3": failing_scorer,
2265
+ }
2266
+ else:
2267
+ scoring = failing_scorer
2268
+
2269
+ if error_score == "raise":
2270
+ with pytest.raises(ValueError, match=error_msg):
2271
+ cross_validate(
2272
+ clf,
2273
+ X,
2274
+ y,
2275
+ cv=3,
2276
+ scoring=scoring,
2277
+ return_train_score=return_train_score,
2278
+ error_score=error_score,
2279
+ )
2280
+ else:
2281
+ warning_msg = (
2282
+ "Scoring failed. The score on this train-test partition for "
2283
+ f"these parameters will be set to {error_score}"
2284
+ )
2285
+ with pytest.warns(UserWarning, match=warning_msg):
2286
+ results = cross_validate(
2287
+ clf,
2288
+ X,
2289
+ y,
2290
+ cv=3,
2291
+ scoring=scoring,
2292
+ return_train_score=return_train_score,
2293
+ error_score=error_score,
2294
+ )
2295
+ for key in results:
2296
+ if "_score" in key:
2297
+ if "_score_2" in key:
2298
+ # check the test (and optionally train) score for the
2299
+ # scorer that should be non-failing
2300
+ for i in results[key]:
2301
+ assert isinstance(i, float)
2302
+ else:
2303
+ # check the test (and optionally train) score for all
2304
+ # scorers that should be assigned to `error_score`.
2305
+ assert_allclose(results[key], error_score)
2306
+
2307
+
2308
+ def three_params_scorer(i, j, k):
2309
+ return 3.4213
2310
+
2311
+
2312
+ @pytest.mark.parametrize(
2313
+ "train_score, scorer, verbose, split_prg, cdt_prg, expected",
2314
+ [
2315
+ (
2316
+ False,
2317
+ three_params_scorer,
2318
+ 2,
2319
+ (1, 3),
2320
+ (0, 1),
2321
+ r"\[CV\] END ...................................................."
2322
+ r" total time= 0.\ds",
2323
+ ),
2324
+ (
2325
+ True,
2326
+ {"sc1": three_params_scorer, "sc2": three_params_scorer},
2327
+ 3,
2328
+ (1, 3),
2329
+ (0, 1),
2330
+ r"\[CV 2/3\] END sc1: \(train=3.421, test=3.421\) sc2: "
2331
+ r"\(train=3.421, test=3.421\) total time= 0.\ds",
2332
+ ),
2333
+ (
2334
+ False,
2335
+ {"sc1": three_params_scorer, "sc2": three_params_scorer},
2336
+ 10,
2337
+ (1, 3),
2338
+ (0, 1),
2339
+ r"\[CV 2/3; 1/1\] END ....... sc1: \(test=3.421\) sc2: \(test=3.421\)"
2340
+ r" total time= 0.\ds",
2341
+ ),
2342
+ ],
2343
+ )
2344
+ def test_fit_and_score_verbosity(
2345
+ capsys, train_score, scorer, verbose, split_prg, cdt_prg, expected
2346
+ ):
2347
+ X, y = make_classification(n_samples=30, random_state=0)
2348
+ clf = SVC(kernel="linear", random_state=0)
2349
+ train, test = next(ShuffleSplit().split(X))
2350
+
2351
+ # test print without train score
2352
+ fit_and_score_args = dict(
2353
+ estimator=clf,
2354
+ X=X,
2355
+ y=y,
2356
+ scorer=scorer,
2357
+ train=train,
2358
+ test=test,
2359
+ verbose=verbose,
2360
+ parameters=None,
2361
+ fit_params=None,
2362
+ score_params=None,
2363
+ return_train_score=train_score,
2364
+ split_progress=split_prg,
2365
+ candidate_progress=cdt_prg,
2366
+ )
2367
+ _fit_and_score(**fit_and_score_args)
2368
+ out, _ = capsys.readouterr()
2369
+ outlines = out.split("\n")
2370
+ if len(outlines) > 2:
2371
+ assert re.match(expected, outlines[1])
2372
+ else:
2373
+ assert re.match(expected, outlines[0])
2374
+
2375
+
2376
+ def test_score():
2377
+ error_message = "scoring must return a number, got None"
2378
+
2379
+ def two_params_scorer(estimator, X_test):
2380
+ return None
2381
+
2382
+ with pytest.raises(ValueError, match=error_message):
2383
+ _score(
2384
+ estimator=None,
2385
+ X_test=None,
2386
+ y_test=None,
2387
+ scorer=two_params_scorer,
2388
+ score_params=None,
2389
+ error_score=np.nan,
2390
+ )
2391
+
2392
+
2393
+ def test_callable_multimetric_confusion_matrix_cross_validate():
2394
+ def custom_scorer(clf, X, y):
2395
+ y_pred = clf.predict(X)
2396
+ cm = confusion_matrix(y, y_pred)
2397
+ return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]}
2398
+
2399
+ X, y = make_classification(n_samples=40, n_features=4, random_state=42)
2400
+ est = LinearSVC(dual="auto", random_state=42)
2401
+ est.fit(X, y)
2402
+ cv_results = cross_validate(est, X, y, cv=5, scoring=custom_scorer)
2403
+
2404
+ score_names = ["tn", "fp", "fn", "tp"]
2405
+ for name in score_names:
2406
+ assert "test_{}".format(name) in cv_results
2407
+
2408
+
2409
+ def test_learning_curve_partial_fit_regressors():
2410
+ """Check that regressors with partial_fit is supported.
2411
+
2412
+ Non-regression test for #22981.
2413
+ """
2414
+ X, y = make_regression(random_state=42)
2415
+
2416
+ # Does not error
2417
+ learning_curve(MLPRegressor(), X, y, exploit_incremental_learning=True, cv=2)
2418
+
2419
+
2420
+ def test_learning_curve_some_failing_fits_warning(global_random_seed):
2421
+ """Checks for fit failures in `learning_curve` and raises the required warning"""
2422
+
2423
+ X, y = make_classification(
2424
+ n_samples=30,
2425
+ n_classes=3,
2426
+ n_informative=6,
2427
+ shuffle=False,
2428
+ random_state=global_random_seed,
2429
+ )
2430
+ # sorting the target to trigger SVC error on the 2 first splits because a single
2431
+ # class is present
2432
+ sorted_idx = np.argsort(y)
2433
+ X, y = X[sorted_idx], y[sorted_idx]
2434
+
2435
+ svc = SVC()
2436
+ warning_message = "10 fits failed out of a total of 25"
2437
+
2438
+ with pytest.warns(FitFailedWarning, match=warning_message):
2439
+ _, train_score, test_score, *_ = learning_curve(
2440
+ svc, X, y, cv=5, error_score=np.nan
2441
+ )
2442
+
2443
+ # the first 2 splits should lead to warnings and thus np.nan scores
2444
+ for idx in range(2):
2445
+ assert np.isnan(train_score[idx]).all()
2446
+ assert np.isnan(test_score[idx]).all()
2447
+
2448
+ for idx in range(2, train_score.shape[0]):
2449
+ assert not np.isnan(train_score[idx]).any()
2450
+ assert not np.isnan(test_score[idx]).any()
2451
+
2452
+
2453
+ def test_cross_validate_return_indices(global_random_seed):
2454
+ """Check the behaviour of `return_indices` in `cross_validate`."""
2455
+ X, y = load_iris(return_X_y=True)
2456
+ X = scale(X) # scale features for better convergence
2457
+ estimator = LogisticRegression()
2458
+
2459
+ cv = KFold(n_splits=3, shuffle=True, random_state=global_random_seed)
2460
+ cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=False)
2461
+ assert "indices" not in cv_results
2462
+
2463
+ cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=True)
2464
+ assert "indices" in cv_results
2465
+ train_indices = cv_results["indices"]["train"]
2466
+ test_indices = cv_results["indices"]["test"]
2467
+ assert len(train_indices) == cv.n_splits
2468
+ assert len(test_indices) == cv.n_splits
2469
+
2470
+ assert_array_equal([indices.size for indices in train_indices], 100)
2471
+ assert_array_equal([indices.size for indices in test_indices], 50)
2472
+
2473
+ for split_idx, (expected_train_idx, expected_test_idx) in enumerate(cv.split(X, y)):
2474
+ assert_array_equal(train_indices[split_idx], expected_train_idx)
2475
+ assert_array_equal(test_indices[split_idx], expected_test_idx)
2476
+
2477
+
2478
+ # Tests for metadata routing in cross_val*
2479
+ # ========================================
2480
+
2481
+
2482
+ # TODO(1.6): remove this test in 1.6
2483
+ def test_cross_validate_fit_param_deprecation():
2484
+ """Check that we warn about deprecating `fit_params`."""
2485
+ with pytest.warns(FutureWarning, match="`fit_params` is deprecated"):
2486
+ cross_validate(estimator=ConsumingClassifier(), X=X, y=y, cv=2, fit_params={})
2487
+
2488
+ with pytest.raises(
2489
+ ValueError, match="`params` and `fit_params` cannot both be provided"
2490
+ ):
2491
+ cross_validate(
2492
+ estimator=ConsumingClassifier(), X=X, y=y, fit_params={}, params={}
2493
+ )
2494
+
2495
+
2496
+ @pytest.mark.usefixtures("enable_slep006")
2497
+ @pytest.mark.parametrize(
2498
+ "cv_method", [cross_validate, cross_val_score, cross_val_predict]
2499
+ )
2500
+ def test_groups_with_routing_validation(cv_method):
2501
+ """Check that we raise an error if `groups` are passed to the cv method instead
2502
+ of `params` when metadata routing is enabled.
2503
+ """
2504
+ with pytest.raises(ValueError, match="`groups` can only be passed if"):
2505
+ cv_method(
2506
+ estimator=ConsumingClassifier(),
2507
+ X=X,
2508
+ y=y,
2509
+ groups=[],
2510
+ )
2511
+
2512
+
2513
+ @pytest.mark.usefixtures("enable_slep006")
2514
+ @pytest.mark.parametrize(
2515
+ "cv_method", [cross_validate, cross_val_score, cross_val_predict]
2516
+ )
2517
+ def test_passed_unrequested_metadata(cv_method):
2518
+ """Check that we raise an error when passing metadata that is not
2519
+ requested."""
2520
+ err_msg = re.escape("but are not explicitly set as requested or not requested")
2521
+ with pytest.raises(ValueError, match=err_msg):
2522
+ cv_method(
2523
+ estimator=ConsumingClassifier(),
2524
+ X=X,
2525
+ y=y,
2526
+ params=dict(metadata=[]),
2527
+ )
2528
+
2529
+
2530
+ @pytest.mark.usefixtures("enable_slep006")
2531
+ @pytest.mark.parametrize(
2532
+ "cv_method", [cross_validate, cross_val_score, cross_val_predict]
2533
+ )
2534
+ def test_cross_validate_routing(cv_method):
2535
+ """Check that the respective cv method is properly dispatching the metadata
2536
+ to the consumer."""
2537
+ scorer_registry = _Registry()
2538
+ scorer = ConsumingScorer(registry=scorer_registry).set_score_request(
2539
+ sample_weight="score_weights", metadata="score_metadata"
2540
+ )
2541
+ splitter_registry = _Registry()
2542
+ splitter = ConsumingSplitter(registry=splitter_registry).set_split_request(
2543
+ groups="split_groups", metadata="split_metadata"
2544
+ )
2545
+ estimator_registry = _Registry()
2546
+ estimator = ConsumingClassifier(registry=estimator_registry).set_fit_request(
2547
+ sample_weight="fit_sample_weight", metadata="fit_metadata"
2548
+ )
2549
+ n_samples = _num_samples(X)
2550
+ rng = np.random.RandomState(0)
2551
+ score_weights = rng.rand(n_samples)
2552
+ score_metadata = rng.rand(n_samples)
2553
+ split_groups = rng.randint(0, 3, n_samples)
2554
+ split_metadata = rng.rand(n_samples)
2555
+ fit_sample_weight = rng.rand(n_samples)
2556
+ fit_metadata = rng.rand(n_samples)
2557
+
2558
+ extra_params = {
2559
+ cross_validate: dict(scoring=dict(my_scorer=scorer, accuracy="accuracy")),
2560
+ # cross_val_score doesn't support multiple scorers
2561
+ cross_val_score: dict(scoring=scorer),
2562
+ # cross_val_predict doesn't need a scorer
2563
+ cross_val_predict: dict(),
2564
+ }
2565
+
2566
+ params = dict(
2567
+ split_groups=split_groups,
2568
+ split_metadata=split_metadata,
2569
+ fit_sample_weight=fit_sample_weight,
2570
+ fit_metadata=fit_metadata,
2571
+ )
2572
+
2573
+ if cv_method is not cross_val_predict:
2574
+ params.update(
2575
+ score_weights=score_weights,
2576
+ score_metadata=score_metadata,
2577
+ )
2578
+
2579
+ cv_method(
2580
+ estimator,
2581
+ X=X,
2582
+ y=y,
2583
+ cv=splitter,
2584
+ **extra_params[cv_method],
2585
+ params=params,
2586
+ )
2587
+
2588
+ if cv_method is not cross_val_predict:
2589
+ # cross_val_predict doesn't need a scorer
2590
+ assert len(scorer_registry)
2591
+ for _scorer in scorer_registry:
2592
+ check_recorded_metadata(
2593
+ obj=_scorer,
2594
+ method="score",
2595
+ split_params=("sample_weight", "metadata"),
2596
+ sample_weight=score_weights,
2597
+ metadata=score_metadata,
2598
+ )
2599
+
2600
+ assert len(splitter_registry)
2601
+ for _splitter in splitter_registry:
2602
+ check_recorded_metadata(
2603
+ obj=_splitter,
2604
+ method="split",
2605
+ groups=split_groups,
2606
+ metadata=split_metadata,
2607
+ )
2608
+
2609
+ assert len(estimator_registry)
2610
+ for _estimator in estimator_registry:
2611
+ check_recorded_metadata(
2612
+ obj=_estimator,
2613
+ method="fit",
2614
+ split_params=("sample_weight", "metadata"),
2615
+ sample_weight=fit_sample_weight,
2616
+ metadata=fit_metadata,
2617
+ )
2618
+
2619
+
2620
+ # End of metadata routing tests
2621
+ # =============================
env-llmeval/lib/python3.10/site-packages/sklearn/utils/__init__.py ADDED
@@ -0,0 +1,1299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The :mod:`sklearn.utils` module includes various utilities.
3
+ """
4
+
5
+ import math
6
+ import numbers
7
+ import platform
8
+ import struct
9
+ import timeit
10
+ import warnings
11
+ from collections.abc import Sequence
12
+ from contextlib import contextmanager, suppress
13
+ from itertools import compress, islice
14
+
15
+ import numpy as np
16
+ from scipy.sparse import issparse
17
+
18
+ from .. import get_config
19
+ from ..exceptions import DataConversionWarning
20
+ from . import _joblib, metadata_routing
21
+ from ._bunch import Bunch
22
+ from ._estimator_html_repr import estimator_html_repr
23
+ from ._param_validation import Integral, Interval, validate_params
24
+ from .class_weight import compute_class_weight, compute_sample_weight
25
+ from .deprecation import deprecated
26
+ from .discovery import all_estimators
27
+ from .fixes import parse_version, threadpool_info
28
+ from .murmurhash import murmurhash3_32
29
+ from .validation import (
30
+ _is_arraylike_not_scalar,
31
+ _is_pandas_df,
32
+ _is_polars_df,
33
+ _use_interchange_protocol,
34
+ as_float_array,
35
+ assert_all_finite,
36
+ check_array,
37
+ check_consistent_length,
38
+ check_random_state,
39
+ check_scalar,
40
+ check_symmetric,
41
+ check_X_y,
42
+ column_or_1d,
43
+ indexable,
44
+ )
45
+
46
+ # Do not deprecate parallel_backend and register_parallel_backend as they are
47
+ # needed to tune `scikit-learn` behavior and have different effect if called
48
+ # from the vendored version or or the site-package version. The other are
49
+ # utilities that are independent of scikit-learn so they are not part of
50
+ # scikit-learn public API.
51
+ parallel_backend = _joblib.parallel_backend
52
+ register_parallel_backend = _joblib.register_parallel_backend
53
+
54
+ __all__ = [
55
+ "murmurhash3_32",
56
+ "as_float_array",
57
+ "assert_all_finite",
58
+ "check_array",
59
+ "check_random_state",
60
+ "compute_class_weight",
61
+ "compute_sample_weight",
62
+ "column_or_1d",
63
+ "check_consistent_length",
64
+ "check_X_y",
65
+ "check_scalar",
66
+ "indexable",
67
+ "check_symmetric",
68
+ "indices_to_mask",
69
+ "deprecated",
70
+ "parallel_backend",
71
+ "register_parallel_backend",
72
+ "resample",
73
+ "shuffle",
74
+ "check_matplotlib_support",
75
+ "all_estimators",
76
+ "DataConversionWarning",
77
+ "estimator_html_repr",
78
+ "Bunch",
79
+ "metadata_routing",
80
+ ]
81
+
82
+ IS_PYPY = platform.python_implementation() == "PyPy"
83
+ _IS_32BIT = 8 * struct.calcsize("P") == 32
84
+ _IS_WASM = platform.machine() in ["wasm32", "wasm64"]
85
+
86
+
87
+ def _in_unstable_openblas_configuration():
88
+ """Return True if in an unstable configuration for OpenBLAS"""
89
+
90
+ # Import libraries which might load OpenBLAS.
91
+ import numpy # noqa
92
+ import scipy # noqa
93
+
94
+ modules_info = threadpool_info()
95
+
96
+ open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info)
97
+ if not open_blas_used:
98
+ return False
99
+
100
+ # OpenBLAS 0.3.16 fixed instability for arm64, see:
101
+ # https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58 # noqa
102
+ openblas_arm64_stable_version = parse_version("0.3.16")
103
+ for info in modules_info:
104
+ if info["internal_api"] != "openblas":
105
+ continue
106
+ openblas_version = info.get("version")
107
+ openblas_architecture = info.get("architecture")
108
+ if openblas_version is None or openblas_architecture is None:
109
+ # Cannot be sure that OpenBLAS is good enough. Assume unstable:
110
+ return True
111
+ if (
112
+ openblas_architecture == "neoversen1"
113
+ and parse_version(openblas_version) < openblas_arm64_stable_version
114
+ ):
115
+ # See discussions in https://github.com/numpy/numpy/issues/19411
116
+ return True
117
+ return False
118
+
119
+
120
+ @validate_params(
121
+ {
122
+ "X": ["array-like", "sparse matrix"],
123
+ "mask": ["array-like"],
124
+ },
125
+ prefer_skip_nested_validation=True,
126
+ )
127
+ def safe_mask(X, mask):
128
+ """Return a mask which is safe to use on X.
129
+
130
+ Parameters
131
+ ----------
132
+ X : {array-like, sparse matrix}
133
+ Data on which to apply mask.
134
+
135
+ mask : array-like
136
+ Mask to be used on X.
137
+
138
+ Returns
139
+ -------
140
+ mask : ndarray
141
+ Array that is safe to use on X.
142
+
143
+ Examples
144
+ --------
145
+ >>> from sklearn.utils import safe_mask
146
+ >>> from scipy.sparse import csr_matrix
147
+ >>> data = csr_matrix([[1], [2], [3], [4], [5]])
148
+ >>> condition = [False, True, True, False, True]
149
+ >>> mask = safe_mask(data, condition)
150
+ >>> data[mask].toarray()
151
+ array([[2],
152
+ [3],
153
+ [5]])
154
+ """
155
+ mask = np.asarray(mask)
156
+ if np.issubdtype(mask.dtype, np.signedinteger):
157
+ return mask
158
+
159
+ if hasattr(X, "toarray"):
160
+ ind = np.arange(mask.shape[0])
161
+ mask = ind[mask]
162
+ return mask
163
+
164
+
165
+ def axis0_safe_slice(X, mask, len_mask):
166
+ """Return a mask which is safer to use on X than safe_mask.
167
+
168
+ This mask is safer than safe_mask since it returns an
169
+ empty array, when a sparse matrix is sliced with a boolean mask
170
+ with all False, instead of raising an unhelpful error in older
171
+ versions of SciPy.
172
+
173
+ See: https://github.com/scipy/scipy/issues/5361
174
+
175
+ Also note that we can avoid doing the dot product by checking if
176
+ the len_mask is not zero in _huber_loss_and_gradient but this
177
+ is not going to be the bottleneck, since the number of outliers
178
+ and non_outliers are typically non-zero and it makes the code
179
+ tougher to follow.
180
+
181
+ Parameters
182
+ ----------
183
+ X : {array-like, sparse matrix}
184
+ Data on which to apply mask.
185
+
186
+ mask : ndarray
187
+ Mask to be used on X.
188
+
189
+ len_mask : int
190
+ The length of the mask.
191
+
192
+ Returns
193
+ -------
194
+ mask : ndarray
195
+ Array that is safe to use on X.
196
+ """
197
+ if len_mask != 0:
198
+ return X[safe_mask(X, mask), :]
199
+ return np.zeros(shape=(0, X.shape[1]))
200
+
201
+
202
+ def _array_indexing(array, key, key_dtype, axis):
203
+ """Index an array or scipy.sparse consistently across NumPy version."""
204
+ if issparse(array) and key_dtype == "bool":
205
+ key = np.asarray(key)
206
+ if isinstance(key, tuple):
207
+ key = list(key)
208
+ return array[key, ...] if axis == 0 else array[:, key]
209
+
210
+
211
+ def _pandas_indexing(X, key, key_dtype, axis):
212
+ """Index a pandas dataframe or a series."""
213
+ if _is_arraylike_not_scalar(key):
214
+ key = np.asarray(key)
215
+
216
+ if key_dtype == "int" and not (isinstance(key, slice) or np.isscalar(key)):
217
+ # using take() instead of iloc[] ensures the return value is a "proper"
218
+ # copy that will not raise SettingWithCopyWarning
219
+ return X.take(key, axis=axis)
220
+ else:
221
+ # check whether we should index with loc or iloc
222
+ indexer = X.iloc if key_dtype == "int" else X.loc
223
+ return indexer[:, key] if axis else indexer[key]
224
+
225
+
226
+ def _list_indexing(X, key, key_dtype):
227
+ """Index a Python list."""
228
+ if np.isscalar(key) or isinstance(key, slice):
229
+ # key is a slice or a scalar
230
+ return X[key]
231
+ if key_dtype == "bool":
232
+ # key is a boolean array-like
233
+ return list(compress(X, key))
234
+ # key is a integer array-like of key
235
+ return [X[idx] for idx in key]
236
+
237
+
238
+ def _polars_indexing(X, key, key_dtype, axis):
239
+ """Indexing X with polars interchange protocol."""
240
+ # Polars behavior is more consistent with lists
241
+ if isinstance(key, np.ndarray):
242
+ key = key.tolist()
243
+
244
+ if axis == 1:
245
+ return X[:, key]
246
+ else:
247
+ return X[key]
248
+
249
+
250
+ def _determine_key_type(key, accept_slice=True):
251
+ """Determine the data type of key.
252
+
253
+ Parameters
254
+ ----------
255
+ key : scalar, slice or array-like
256
+ The key from which we want to infer the data type.
257
+
258
+ accept_slice : bool, default=True
259
+ Whether or not to raise an error if the key is a slice.
260
+
261
+ Returns
262
+ -------
263
+ dtype : {'int', 'str', 'bool', None}
264
+ Returns the data type of key.
265
+ """
266
+ err_msg = (
267
+ "No valid specification of the columns. Only a scalar, list or "
268
+ "slice of all integers or all strings, or boolean mask is "
269
+ "allowed"
270
+ )
271
+
272
+ dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"}
273
+ array_dtype_to_str = {
274
+ "i": "int",
275
+ "u": "int",
276
+ "b": "bool",
277
+ "O": "str",
278
+ "U": "str",
279
+ "S": "str",
280
+ }
281
+
282
+ if key is None:
283
+ return None
284
+ if isinstance(key, tuple(dtype_to_str.keys())):
285
+ try:
286
+ return dtype_to_str[type(key)]
287
+ except KeyError:
288
+ raise ValueError(err_msg)
289
+ if isinstance(key, slice):
290
+ if not accept_slice:
291
+ raise TypeError(
292
+ "Only array-like or scalar are supported. A Python slice was given."
293
+ )
294
+ if key.start is None and key.stop is None:
295
+ return None
296
+ key_start_type = _determine_key_type(key.start)
297
+ key_stop_type = _determine_key_type(key.stop)
298
+ if key_start_type is not None and key_stop_type is not None:
299
+ if key_start_type != key_stop_type:
300
+ raise ValueError(err_msg)
301
+ if key_start_type is not None:
302
+ return key_start_type
303
+ return key_stop_type
304
+ if isinstance(key, (list, tuple)):
305
+ unique_key = set(key)
306
+ key_type = {_determine_key_type(elt) for elt in unique_key}
307
+ if not key_type:
308
+ return None
309
+ if len(key_type) != 1:
310
+ raise ValueError(err_msg)
311
+ return key_type.pop()
312
+ if hasattr(key, "dtype"):
313
+ try:
314
+ return array_dtype_to_str[key.dtype.kind]
315
+ except KeyError:
316
+ raise ValueError(err_msg)
317
+ raise ValueError(err_msg)
318
+
319
+
320
+ def _safe_indexing(X, indices, *, axis=0):
321
+ """Return rows, items or columns of X using indices.
322
+
323
+ .. warning::
324
+
325
+ This utility is documented, but **private**. This means that
326
+ backward compatibility might be broken without any deprecation
327
+ cycle.
328
+
329
+ Parameters
330
+ ----------
331
+ X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
332
+ Data from which to sample rows, items or columns. `list` are only
333
+ supported when `axis=0`.
334
+ indices : bool, int, str, slice, array-like
335
+ - If `axis=0`, boolean and integer array-like, integer slice,
336
+ and scalar integer are supported.
337
+ - If `axis=1`:
338
+ - to select a single column, `indices` can be of `int` type for
339
+ all `X` types and `str` only for dataframe. The selected subset
340
+ will be 1D, unless `X` is a sparse matrix in which case it will
341
+ be 2D.
342
+ - to select multiples columns, `indices` can be one of the
343
+ following: `list`, `array`, `slice`. The type used in
344
+ these containers can be one of the following: `int`, 'bool' and
345
+ `str`. However, `str` is only supported when `X` is a dataframe.
346
+ The selected subset will be 2D.
347
+ axis : int, default=0
348
+ The axis along which `X` will be subsampled. `axis=0` will select
349
+ rows while `axis=1` will select columns.
350
+
351
+ Returns
352
+ -------
353
+ subset
354
+ Subset of X on axis 0 or 1.
355
+
356
+ Notes
357
+ -----
358
+ CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
359
+ not supported.
360
+
361
+ Examples
362
+ --------
363
+ >>> import numpy as np
364
+ >>> from sklearn.utils import _safe_indexing
365
+ >>> data = np.array([[1, 2], [3, 4], [5, 6]])
366
+ >>> _safe_indexing(data, 0, axis=0) # select the first row
367
+ array([1, 2])
368
+ >>> _safe_indexing(data, 0, axis=1) # select the first column
369
+ array([1, 3, 5])
370
+ """
371
+ if indices is None:
372
+ return X
373
+
374
+ if axis not in (0, 1):
375
+ raise ValueError(
376
+ "'axis' should be either 0 (to index rows) or 1 (to index "
377
+ " column). Got {} instead.".format(axis)
378
+ )
379
+
380
+ indices_dtype = _determine_key_type(indices)
381
+
382
+ if axis == 0 and indices_dtype == "str":
383
+ raise ValueError("String indexing is not supported with 'axis=0'")
384
+
385
+ if axis == 1 and isinstance(X, list):
386
+ raise ValueError("axis=1 is not supported for lists")
387
+
388
+ if axis == 1 and hasattr(X, "ndim") and X.ndim != 2:
389
+ raise ValueError(
390
+ "'X' should be a 2D NumPy array, 2D sparse matrix or pandas "
391
+ "dataframe when indexing the columns (i.e. 'axis=1'). "
392
+ "Got {} instead with {} dimension(s).".format(type(X), X.ndim)
393
+ )
394
+
395
+ if (
396
+ axis == 1
397
+ and indices_dtype == "str"
398
+ and not (_is_pandas_df(X) or _use_interchange_protocol(X))
399
+ ):
400
+ raise ValueError(
401
+ "Specifying the columns using strings is only supported for dataframes."
402
+ )
403
+
404
+ if hasattr(X, "iloc"):
405
+ # TODO: we should probably use _is_pandas_df(X) instead but this would
406
+ # require updating some tests such as test_train_test_split_mock_pandas.
407
+ return _pandas_indexing(X, indices, indices_dtype, axis=axis)
408
+ elif _is_polars_df(X):
409
+ return _polars_indexing(X, indices, indices_dtype, axis=axis)
410
+ elif hasattr(X, "shape"):
411
+ return _array_indexing(X, indices, indices_dtype, axis=axis)
412
+ else:
413
+ return _list_indexing(X, indices, indices_dtype)
414
+
415
+
416
+ def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
417
+ """Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
418
+
419
+ Parameters
420
+ ----------
421
+ X : {ndarray, sparse-matrix, dataframe}
422
+ Array to be modified. It is expected to be 2-dimensional.
423
+
424
+ values : ndarray
425
+ The values to be assigned to `X`.
426
+
427
+ row_indexer : array-like, dtype={int, bool}, default=None
428
+ A 1-dimensional array to select the rows of interest. If `None`, all
429
+ rows are selected.
430
+
431
+ column_indexer : array-like, dtype={int, bool}, default=None
432
+ A 1-dimensional array to select the columns of interest. If `None`, all
433
+ columns are selected.
434
+ """
435
+ row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
436
+ column_indexer = (
437
+ slice(None, None, None) if column_indexer is None else column_indexer
438
+ )
439
+
440
+ if hasattr(X, "iloc"): # pandas dataframe
441
+ with warnings.catch_warnings():
442
+ # pandas >= 1.5 raises a warning when using iloc to set values in a column
443
+ # that does not have the same type as the column being set. It happens
444
+ # for instance when setting a categorical column with a string.
445
+ # In the future the behavior won't change and the warning should disappear.
446
+ # TODO(1.3): check if the warning is still raised or remove the filter.
447
+ warnings.simplefilter("ignore", FutureWarning)
448
+ X.iloc[row_indexer, column_indexer] = values
449
+ else: # numpy array or sparse matrix
450
+ X[row_indexer, column_indexer] = values
451
+
452
+
453
+ def _get_column_indices_for_bool_or_int(key, n_columns):
454
+ # Convert key into list of positive integer indexes
455
+ try:
456
+ idx = _safe_indexing(np.arange(n_columns), key)
457
+ except IndexError as e:
458
+ raise ValueError(
459
+ f"all features must be in [0, {n_columns - 1}] or [-{n_columns}, 0]"
460
+ ) from e
461
+ return np.atleast_1d(idx).tolist()
462
+
463
+
464
+ def _get_column_indices(X, key):
465
+ """Get feature column indices for input data X and key.
466
+
467
+ For accepted values of `key`, see the docstring of
468
+ :func:`_safe_indexing`.
469
+ """
470
+ key_dtype = _determine_key_type(key)
471
+ if _use_interchange_protocol(X):
472
+ return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype)
473
+
474
+ n_columns = X.shape[1]
475
+ if isinstance(key, (list, tuple)) and not key:
476
+ # we get an empty list
477
+ return []
478
+ elif key_dtype in ("bool", "int"):
479
+ return _get_column_indices_for_bool_or_int(key, n_columns)
480
+ else:
481
+ try:
482
+ all_columns = X.columns
483
+ except AttributeError:
484
+ raise ValueError(
485
+ "Specifying the columns using strings is only supported for dataframes."
486
+ )
487
+ if isinstance(key, str):
488
+ columns = [key]
489
+ elif isinstance(key, slice):
490
+ start, stop = key.start, key.stop
491
+ if start is not None:
492
+ start = all_columns.get_loc(start)
493
+ if stop is not None:
494
+ # pandas indexing with strings is endpoint included
495
+ stop = all_columns.get_loc(stop) + 1
496
+ else:
497
+ stop = n_columns + 1
498
+ return list(islice(range(n_columns), start, stop))
499
+ else:
500
+ columns = list(key)
501
+
502
+ try:
503
+ column_indices = []
504
+ for col in columns:
505
+ col_idx = all_columns.get_loc(col)
506
+ if not isinstance(col_idx, numbers.Integral):
507
+ raise ValueError(
508
+ f"Selected columns, {columns}, are not unique in dataframe"
509
+ )
510
+ column_indices.append(col_idx)
511
+
512
+ except KeyError as e:
513
+ raise ValueError("A given column is not a column of the dataframe") from e
514
+
515
+ return column_indices
516
+
517
+
518
+ def _get_column_indices_interchange(X_interchange, key, key_dtype):
519
+ """Same as _get_column_indices but for X with __dataframe__ protocol."""
520
+
521
+ n_columns = X_interchange.num_columns()
522
+
523
+ if isinstance(key, (list, tuple)) and not key:
524
+ # we get an empty list
525
+ return []
526
+ elif key_dtype in ("bool", "int"):
527
+ return _get_column_indices_for_bool_or_int(key, n_columns)
528
+ else:
529
+ column_names = list(X_interchange.column_names())
530
+
531
+ if isinstance(key, slice):
532
+ if key.step not in [1, None]:
533
+ raise NotImplementedError("key.step must be 1 or None")
534
+ start, stop = key.start, key.stop
535
+ if start is not None:
536
+ start = column_names.index(start)
537
+
538
+ if stop is not None:
539
+ stop = column_names.index(stop) + 1
540
+ else:
541
+ stop = n_columns + 1
542
+ return list(islice(range(n_columns), start, stop))
543
+
544
+ selected_columns = [key] if np.isscalar(key) else key
545
+
546
+ try:
547
+ return [column_names.index(col) for col in selected_columns]
548
+ except ValueError as e:
549
+ raise ValueError("A given column is not a column of the dataframe") from e
550
+
551
+
552
+ @validate_params(
553
+ {
554
+ "replace": ["boolean"],
555
+ "n_samples": [Interval(numbers.Integral, 1, None, closed="left"), None],
556
+ "random_state": ["random_state"],
557
+ "stratify": ["array-like", None],
558
+ },
559
+ prefer_skip_nested_validation=True,
560
+ )
561
+ def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None):
562
+ """Resample arrays or sparse matrices in a consistent way.
563
+
564
+ The default strategy implements one step of the bootstrapping
565
+ procedure.
566
+
567
+ Parameters
568
+ ----------
569
+ *arrays : sequence of array-like of shape (n_samples,) or \
570
+ (n_samples, n_outputs)
571
+ Indexable data-structures can be arrays, lists, dataframes or scipy
572
+ sparse matrices with consistent first dimension.
573
+
574
+ replace : bool, default=True
575
+ Implements resampling with replacement. If False, this will implement
576
+ (sliced) random permutations.
577
+
578
+ n_samples : int, default=None
579
+ Number of samples to generate. If left to None this is
580
+ automatically set to the first dimension of the arrays.
581
+ If replace is False it should not be larger than the length of
582
+ arrays.
583
+
584
+ random_state : int, RandomState instance or None, default=None
585
+ Determines random number generation for shuffling
586
+ the data.
587
+ Pass an int for reproducible results across multiple function calls.
588
+ See :term:`Glossary <random_state>`.
589
+
590
+ stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \
591
+ default=None
592
+ If not None, data is split in a stratified fashion, using this as
593
+ the class labels.
594
+
595
+ Returns
596
+ -------
597
+ resampled_arrays : sequence of array-like of shape (n_samples,) or \
598
+ (n_samples, n_outputs)
599
+ Sequence of resampled copies of the collections. The original arrays
600
+ are not impacted.
601
+
602
+ See Also
603
+ --------
604
+ shuffle : Shuffle arrays or sparse matrices in a consistent way.
605
+
606
+ Examples
607
+ --------
608
+ It is possible to mix sparse and dense arrays in the same run::
609
+
610
+ >>> import numpy as np
611
+ >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
612
+ >>> y = np.array([0, 1, 2])
613
+
614
+ >>> from scipy.sparse import coo_matrix
615
+ >>> X_sparse = coo_matrix(X)
616
+
617
+ >>> from sklearn.utils import resample
618
+ >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
619
+ >>> X
620
+ array([[1., 0.],
621
+ [2., 1.],
622
+ [1., 0.]])
623
+
624
+ >>> X_sparse
625
+ <3x2 sparse matrix of type '<... 'numpy.float64'>'
626
+ with 4 stored elements in Compressed Sparse Row format>
627
+
628
+ >>> X_sparse.toarray()
629
+ array([[1., 0.],
630
+ [2., 1.],
631
+ [1., 0.]])
632
+
633
+ >>> y
634
+ array([0, 1, 0])
635
+
636
+ >>> resample(y, n_samples=2, random_state=0)
637
+ array([0, 1])
638
+
639
+ Example using stratification::
640
+
641
+ >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1]
642
+ >>> resample(y, n_samples=5, replace=False, stratify=y,
643
+ ... random_state=0)
644
+ [1, 1, 1, 0, 1]
645
+ """
646
+ max_n_samples = n_samples
647
+ random_state = check_random_state(random_state)
648
+
649
+ if len(arrays) == 0:
650
+ return None
651
+
652
+ first = arrays[0]
653
+ n_samples = first.shape[0] if hasattr(first, "shape") else len(first)
654
+
655
+ if max_n_samples is None:
656
+ max_n_samples = n_samples
657
+ elif (max_n_samples > n_samples) and (not replace):
658
+ raise ValueError(
659
+ "Cannot sample %d out of arrays with dim %d when replace is False"
660
+ % (max_n_samples, n_samples)
661
+ )
662
+
663
+ check_consistent_length(*arrays)
664
+
665
+ if stratify is None:
666
+ if replace:
667
+ indices = random_state.randint(0, n_samples, size=(max_n_samples,))
668
+ else:
669
+ indices = np.arange(n_samples)
670
+ random_state.shuffle(indices)
671
+ indices = indices[:max_n_samples]
672
+ else:
673
+ # Code adapted from StratifiedShuffleSplit()
674
+ y = check_array(stratify, ensure_2d=False, dtype=None)
675
+ if y.ndim == 2:
676
+ # for multi-label y, map each distinct row to a string repr
677
+ # using join because str(row) uses an ellipsis if len(row) > 1000
678
+ y = np.array([" ".join(row.astype("str")) for row in y])
679
+
680
+ classes, y_indices = np.unique(y, return_inverse=True)
681
+ n_classes = classes.shape[0]
682
+
683
+ class_counts = np.bincount(y_indices)
684
+
685
+ # Find the sorted list of instances for each class:
686
+ # (np.unique above performs a sort, so code is O(n logn) already)
687
+ class_indices = np.split(
688
+ np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
689
+ )
690
+
691
+ n_i = _approximate_mode(class_counts, max_n_samples, random_state)
692
+
693
+ indices = []
694
+
695
+ for i in range(n_classes):
696
+ indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace)
697
+ indices.extend(indices_i)
698
+
699
+ indices = random_state.permutation(indices)
700
+
701
+ # convert sparse matrices to CSR for row-based indexing
702
+ arrays = [a.tocsr() if issparse(a) else a for a in arrays]
703
+ resampled_arrays = [_safe_indexing(a, indices) for a in arrays]
704
+ if len(resampled_arrays) == 1:
705
+ # syntactic sugar for the unit argument case
706
+ return resampled_arrays[0]
707
+ else:
708
+ return resampled_arrays
709
+
710
+
711
+ def shuffle(*arrays, random_state=None, n_samples=None):
712
+ """Shuffle arrays or sparse matrices in a consistent way.
713
+
714
+ This is a convenience alias to ``resample(*arrays, replace=False)`` to do
715
+ random permutations of the collections.
716
+
717
+ Parameters
718
+ ----------
719
+ *arrays : sequence of indexable data-structures
720
+ Indexable data-structures can be arrays, lists, dataframes or scipy
721
+ sparse matrices with consistent first dimension.
722
+
723
+ random_state : int, RandomState instance or None, default=None
724
+ Determines random number generation for shuffling
725
+ the data.
726
+ Pass an int for reproducible results across multiple function calls.
727
+ See :term:`Glossary <random_state>`.
728
+
729
+ n_samples : int, default=None
730
+ Number of samples to generate. If left to None this is
731
+ automatically set to the first dimension of the arrays. It should
732
+ not be larger than the length of arrays.
733
+
734
+ Returns
735
+ -------
736
+ shuffled_arrays : sequence of indexable data-structures
737
+ Sequence of shuffled copies of the collections. The original arrays
738
+ are not impacted.
739
+
740
+ See Also
741
+ --------
742
+ resample : Resample arrays or sparse matrices in a consistent way.
743
+
744
+ Examples
745
+ --------
746
+ It is possible to mix sparse and dense arrays in the same run::
747
+
748
+ >>> import numpy as np
749
+ >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
750
+ >>> y = np.array([0, 1, 2])
751
+
752
+ >>> from scipy.sparse import coo_matrix
753
+ >>> X_sparse = coo_matrix(X)
754
+
755
+ >>> from sklearn.utils import shuffle
756
+ >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
757
+ >>> X
758
+ array([[0., 0.],
759
+ [2., 1.],
760
+ [1., 0.]])
761
+
762
+ >>> X_sparse
763
+ <3x2 sparse matrix of type '<... 'numpy.float64'>'
764
+ with 3 stored elements in Compressed Sparse Row format>
765
+
766
+ >>> X_sparse.toarray()
767
+ array([[0., 0.],
768
+ [2., 1.],
769
+ [1., 0.]])
770
+
771
+ >>> y
772
+ array([2, 1, 0])
773
+
774
+ >>> shuffle(y, n_samples=2, random_state=0)
775
+ array([0, 1])
776
+ """
777
+ return resample(
778
+ *arrays, replace=False, n_samples=n_samples, random_state=random_state
779
+ )
780
+
781
+
782
+ def safe_sqr(X, *, copy=True):
783
+ """Element wise squaring of array-likes and sparse matrices.
784
+
785
+ Parameters
786
+ ----------
787
+ X : {array-like, ndarray, sparse matrix}
788
+
789
+ copy : bool, default=True
790
+ Whether to create a copy of X and operate on it or to perform
791
+ inplace computation (default behaviour).
792
+
793
+ Returns
794
+ -------
795
+ X ** 2 : element wise square
796
+ Return the element-wise square of the input.
797
+
798
+ Examples
799
+ --------
800
+ >>> from sklearn.utils import safe_sqr
801
+ >>> safe_sqr([1, 2, 3])
802
+ array([1, 4, 9])
803
+ """
804
+ X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False)
805
+ if issparse(X):
806
+ if copy:
807
+ X = X.copy()
808
+ X.data **= 2
809
+ else:
810
+ if copy:
811
+ X = X**2
812
+ else:
813
+ X **= 2
814
+ return X
815
+
816
+
817
+ def _chunk_generator(gen, chunksize):
818
+ """Chunk generator, ``gen`` into lists of length ``chunksize``. The last
819
+ chunk may have a length less than ``chunksize``."""
820
+ while True:
821
+ chunk = list(islice(gen, chunksize))
822
+ if chunk:
823
+ yield chunk
824
+ else:
825
+ return
826
+
827
+
828
+ @validate_params(
829
+ {
830
+ "n": [Interval(numbers.Integral, 1, None, closed="left")],
831
+ "batch_size": [Interval(numbers.Integral, 1, None, closed="left")],
832
+ "min_batch_size": [Interval(numbers.Integral, 0, None, closed="left")],
833
+ },
834
+ prefer_skip_nested_validation=True,
835
+ )
836
+ def gen_batches(n, batch_size, *, min_batch_size=0):
837
+ """Generator to create slices containing `batch_size` elements from 0 to `n`.
838
+
839
+ The last slice may contain less than `batch_size` elements, when
840
+ `batch_size` does not divide `n`.
841
+
842
+ Parameters
843
+ ----------
844
+ n : int
845
+ Size of the sequence.
846
+ batch_size : int
847
+ Number of elements in each batch.
848
+ min_batch_size : int, default=0
849
+ Minimum number of elements in each batch.
850
+
851
+ Yields
852
+ ------
853
+ slice of `batch_size` elements
854
+
855
+ See Also
856
+ --------
857
+ gen_even_slices: Generator to create n_packs slices going up to n.
858
+
859
+ Examples
860
+ --------
861
+ >>> from sklearn.utils import gen_batches
862
+ >>> list(gen_batches(7, 3))
863
+ [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
864
+ >>> list(gen_batches(6, 3))
865
+ [slice(0, 3, None), slice(3, 6, None)]
866
+ >>> list(gen_batches(2, 3))
867
+ [slice(0, 2, None)]
868
+ >>> list(gen_batches(7, 3, min_batch_size=0))
869
+ [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
870
+ >>> list(gen_batches(7, 3, min_batch_size=2))
871
+ [slice(0, 3, None), slice(3, 7, None)]
872
+ """
873
+ start = 0
874
+ for _ in range(int(n // batch_size)):
875
+ end = start + batch_size
876
+ if end + min_batch_size > n:
877
+ continue
878
+ yield slice(start, end)
879
+ start = end
880
+ if start < n:
881
+ yield slice(start, n)
882
+
883
+
884
+ @validate_params(
885
+ {
886
+ "n": [Interval(Integral, 1, None, closed="left")],
887
+ "n_packs": [Interval(Integral, 1, None, closed="left")],
888
+ "n_samples": [Interval(Integral, 1, None, closed="left"), None],
889
+ },
890
+ prefer_skip_nested_validation=True,
891
+ )
892
+ def gen_even_slices(n, n_packs, *, n_samples=None):
893
+ """Generator to create `n_packs` evenly spaced slices going up to `n`.
894
+
895
+ If `n_packs` does not divide `n`, except for the first `n % n_packs`
896
+ slices, remaining slices may contain fewer elements.
897
+
898
+ Parameters
899
+ ----------
900
+ n : int
901
+ Size of the sequence.
902
+ n_packs : int
903
+ Number of slices to generate.
904
+ n_samples : int, default=None
905
+ Number of samples. Pass `n_samples` when the slices are to be used for
906
+ sparse matrix indexing; slicing off-the-end raises an exception, while
907
+ it works for NumPy arrays.
908
+
909
+ Yields
910
+ ------
911
+ `slice` representing a set of indices from 0 to n.
912
+
913
+ See Also
914
+ --------
915
+ gen_batches: Generator to create slices containing batch_size elements
916
+ from 0 to n.
917
+
918
+ Examples
919
+ --------
920
+ >>> from sklearn.utils import gen_even_slices
921
+ >>> list(gen_even_slices(10, 1))
922
+ [slice(0, 10, None)]
923
+ >>> list(gen_even_slices(10, 10))
924
+ [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
925
+ >>> list(gen_even_slices(10, 5))
926
+ [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
927
+ >>> list(gen_even_slices(10, 3))
928
+ [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
929
+ """
930
+ start = 0
931
+ for pack_num in range(n_packs):
932
+ this_n = n // n_packs
933
+ if pack_num < n % n_packs:
934
+ this_n += 1
935
+ if this_n > 0:
936
+ end = start + this_n
937
+ if n_samples is not None:
938
+ end = min(n_samples, end)
939
+ yield slice(start, end, None)
940
+ start = end
941
+
942
+
943
+ def tosequence(x):
944
+ """Cast iterable x to a Sequence, avoiding a copy if possible.
945
+
946
+ Parameters
947
+ ----------
948
+ x : iterable
949
+ The iterable to be converted.
950
+
951
+ Returns
952
+ -------
953
+ x : Sequence
954
+ If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
955
+ is a `Sequence`, `x` is returned as-is. If `x` is from any other
956
+ type, `x` is returned casted as a list.
957
+ """
958
+ if isinstance(x, np.ndarray):
959
+ return np.asarray(x)
960
+ elif isinstance(x, Sequence):
961
+ return x
962
+ else:
963
+ return list(x)
964
+
965
+
966
+ def _to_object_array(sequence):
967
+ """Convert sequence to a 1-D NumPy array of object dtype.
968
+
969
+ numpy.array constructor has a similar use but it's output
970
+ is ambiguous. It can be 1-D NumPy array of object dtype if
971
+ the input is a ragged array, but if the input is a list of
972
+ equal length arrays, then the output is a 2D numpy.array.
973
+ _to_object_array solves this ambiguity by guarantying that
974
+ the output is a 1-D NumPy array of objects for any input.
975
+
976
+ Parameters
977
+ ----------
978
+ sequence : array-like of shape (n_elements,)
979
+ The sequence to be converted.
980
+
981
+ Returns
982
+ -------
983
+ out : ndarray of shape (n_elements,), dtype=object
984
+ The converted sequence into a 1-D NumPy array of object dtype.
985
+
986
+ Examples
987
+ --------
988
+ >>> import numpy as np
989
+ >>> from sklearn.utils import _to_object_array
990
+ >>> _to_object_array([np.array([0]), np.array([1])])
991
+ array([array([0]), array([1])], dtype=object)
992
+ >>> _to_object_array([np.array([0]), np.array([1, 2])])
993
+ array([array([0]), array([1, 2])], dtype=object)
994
+ >>> _to_object_array([np.array([0]), np.array([1, 2])])
995
+ array([array([0]), array([1, 2])], dtype=object)
996
+ """
997
+ out = np.empty(len(sequence), dtype=object)
998
+ out[:] = sequence
999
+ return out
1000
+
1001
+
1002
+ def indices_to_mask(indices, mask_length):
1003
+ """Convert list of indices to boolean mask.
1004
+
1005
+ Parameters
1006
+ ----------
1007
+ indices : list-like
1008
+ List of integers treated as indices.
1009
+ mask_length : int
1010
+ Length of boolean mask to be generated.
1011
+ This parameter must be greater than max(indices).
1012
+
1013
+ Returns
1014
+ -------
1015
+ mask : 1d boolean nd-array
1016
+ Boolean array that is True where indices are present, else False.
1017
+
1018
+ Examples
1019
+ --------
1020
+ >>> from sklearn.utils import indices_to_mask
1021
+ >>> indices = [1, 2 , 3, 4]
1022
+ >>> indices_to_mask(indices, 5)
1023
+ array([False, True, True, True, True])
1024
+ """
1025
+ if mask_length <= np.max(indices):
1026
+ raise ValueError("mask_length must be greater than max(indices)")
1027
+
1028
+ mask = np.zeros(mask_length, dtype=bool)
1029
+ mask[indices] = True
1030
+
1031
+ return mask
1032
+
1033
+
1034
+ def _message_with_time(source, message, time):
1035
+ """Create one line message for logging purposes.
1036
+
1037
+ Parameters
1038
+ ----------
1039
+ source : str
1040
+ String indicating the source or the reference of the message.
1041
+
1042
+ message : str
1043
+ Short message.
1044
+
1045
+ time : int
1046
+ Time in seconds.
1047
+ """
1048
+ start_message = "[%s] " % source
1049
+
1050
+ # adapted from joblib.logger.short_format_time without the Windows -.1s
1051
+ # adjustment
1052
+ if time > 60:
1053
+ time_str = "%4.1fmin" % (time / 60)
1054
+ else:
1055
+ time_str = " %5.1fs" % time
1056
+ end_message = " %s, total=%s" % (message, time_str)
1057
+ dots_len = 70 - len(start_message) - len(end_message)
1058
+ return "%s%s%s" % (start_message, dots_len * ".", end_message)
1059
+
1060
+
1061
+ @contextmanager
1062
+ def _print_elapsed_time(source, message=None):
1063
+ """Log elapsed time to stdout when the context is exited.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ source : str
1068
+ String indicating the source or the reference of the message.
1069
+
1070
+ message : str, default=None
1071
+ Short message. If None, nothing will be printed.
1072
+
1073
+ Returns
1074
+ -------
1075
+ context_manager
1076
+ Prints elapsed time upon exit if verbose.
1077
+ """
1078
+ if message is None:
1079
+ yield
1080
+ else:
1081
+ start = timeit.default_timer()
1082
+ yield
1083
+ print(_message_with_time(source, message, timeit.default_timer() - start))
1084
+
1085
+
1086
+ def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
1087
+ """Calculate how many rows can be processed within `working_memory`.
1088
+
1089
+ Parameters
1090
+ ----------
1091
+ row_bytes : int
1092
+ The expected number of bytes of memory that will be consumed
1093
+ during the processing of each row.
1094
+ max_n_rows : int, default=None
1095
+ The maximum return value.
1096
+ working_memory : int or float, default=None
1097
+ The number of rows to fit inside this number of MiB will be
1098
+ returned. When None (default), the value of
1099
+ ``sklearn.get_config()['working_memory']`` is used.
1100
+
1101
+ Returns
1102
+ -------
1103
+ int
1104
+ The number of rows which can be processed within `working_memory`.
1105
+
1106
+ Warns
1107
+ -----
1108
+ Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
1109
+ """
1110
+
1111
+ if working_memory is None:
1112
+ working_memory = get_config()["working_memory"]
1113
+
1114
+ chunk_n_rows = int(working_memory * (2**20) // row_bytes)
1115
+ if max_n_rows is not None:
1116
+ chunk_n_rows = min(chunk_n_rows, max_n_rows)
1117
+ if chunk_n_rows < 1:
1118
+ warnings.warn(
1119
+ "Could not adhere to working_memory config. "
1120
+ "Currently %.0fMiB, %.0fMiB required."
1121
+ % (working_memory, np.ceil(row_bytes * 2**-20))
1122
+ )
1123
+ chunk_n_rows = 1
1124
+ return chunk_n_rows
1125
+
1126
+
1127
+ def _is_pandas_na(x):
1128
+ """Test if x is pandas.NA.
1129
+
1130
+ We intentionally do not use this function to return `True` for `pd.NA` in
1131
+ `is_scalar_nan`, because estimators that support `pd.NA` are the exception
1132
+ rather than the rule at the moment. When `pd.NA` is more universally
1133
+ supported, we may reconsider this decision.
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ x : any type
1138
+
1139
+ Returns
1140
+ -------
1141
+ boolean
1142
+ """
1143
+ with suppress(ImportError):
1144
+ from pandas import NA
1145
+
1146
+ return x is NA
1147
+
1148
+ return False
1149
+
1150
+
1151
+ def is_scalar_nan(x):
1152
+ """Test if x is NaN.
1153
+
1154
+ This function is meant to overcome the issue that np.isnan does not allow
1155
+ non-numerical types as input, and that np.nan is not float('nan').
1156
+
1157
+ Parameters
1158
+ ----------
1159
+ x : any type
1160
+ Any scalar value.
1161
+
1162
+ Returns
1163
+ -------
1164
+ bool
1165
+ Returns true if x is NaN, and false otherwise.
1166
+
1167
+ Examples
1168
+ --------
1169
+ >>> import numpy as np
1170
+ >>> from sklearn.utils import is_scalar_nan
1171
+ >>> is_scalar_nan(np.nan)
1172
+ True
1173
+ >>> is_scalar_nan(float("nan"))
1174
+ True
1175
+ >>> is_scalar_nan(None)
1176
+ False
1177
+ >>> is_scalar_nan("")
1178
+ False
1179
+ >>> is_scalar_nan([np.nan])
1180
+ False
1181
+ """
1182
+ return (
1183
+ not isinstance(x, numbers.Integral)
1184
+ and isinstance(x, numbers.Real)
1185
+ and math.isnan(x)
1186
+ )
1187
+
1188
+
1189
+ def _approximate_mode(class_counts, n_draws, rng):
1190
+ """Computes approximate mode of multivariate hypergeometric.
1191
+
1192
+ This is an approximation to the mode of the multivariate
1193
+ hypergeometric given by class_counts and n_draws.
1194
+ It shouldn't be off by more than one.
1195
+
1196
+ It is the mostly likely outcome of drawing n_draws many
1197
+ samples from the population given by class_counts.
1198
+
1199
+ Parameters
1200
+ ----------
1201
+ class_counts : ndarray of int
1202
+ Population per class.
1203
+ n_draws : int
1204
+ Number of draws (samples to draw) from the overall population.
1205
+ rng : random state
1206
+ Used to break ties.
1207
+
1208
+ Returns
1209
+ -------
1210
+ sampled_classes : ndarray of int
1211
+ Number of samples drawn from each class.
1212
+ np.sum(sampled_classes) == n_draws
1213
+
1214
+ Examples
1215
+ --------
1216
+ >>> import numpy as np
1217
+ >>> from sklearn.utils import _approximate_mode
1218
+ >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
1219
+ array([2, 1])
1220
+ >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
1221
+ array([3, 1])
1222
+ >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
1223
+ ... n_draws=2, rng=0)
1224
+ array([0, 1, 1, 0])
1225
+ >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
1226
+ ... n_draws=2, rng=42)
1227
+ array([1, 1, 0, 0])
1228
+ """
1229
+ rng = check_random_state(rng)
1230
+ # this computes a bad approximation to the mode of the
1231
+ # multivariate hypergeometric given by class_counts and n_draws
1232
+ continuous = class_counts / class_counts.sum() * n_draws
1233
+ # floored means we don't overshoot n_samples, but probably undershoot
1234
+ floored = np.floor(continuous)
1235
+ # we add samples according to how much "left over" probability
1236
+ # they had, until we arrive at n_samples
1237
+ need_to_add = int(n_draws - floored.sum())
1238
+ if need_to_add > 0:
1239
+ remainder = continuous - floored
1240
+ values = np.sort(np.unique(remainder))[::-1]
1241
+ # add according to remainder, but break ties
1242
+ # randomly to avoid biases
1243
+ for value in values:
1244
+ (inds,) = np.where(remainder == value)
1245
+ # if we need_to_add less than what's in inds
1246
+ # we draw randomly from them.
1247
+ # if we need to add more, we add them all and
1248
+ # go to the next value
1249
+ add_now = min(len(inds), need_to_add)
1250
+ inds = rng.choice(inds, size=add_now, replace=False)
1251
+ floored[inds] += 1
1252
+ need_to_add -= add_now
1253
+ if need_to_add == 0:
1254
+ break
1255
+ return floored.astype(int)
1256
+
1257
+
1258
+ def check_matplotlib_support(caller_name):
1259
+ """Raise ImportError with detailed error message if mpl is not installed.
1260
+
1261
+ Plot utilities like any of the Display's plotting functions should lazily import
1262
+ matplotlib and call this helper before any computation.
1263
+
1264
+ Parameters
1265
+ ----------
1266
+ caller_name : str
1267
+ The name of the caller that requires matplotlib.
1268
+ """
1269
+ try:
1270
+ import matplotlib # noqa
1271
+ except ImportError as e:
1272
+ raise ImportError(
1273
+ "{} requires matplotlib. You can install matplotlib with "
1274
+ "`pip install matplotlib`".format(caller_name)
1275
+ ) from e
1276
+
1277
+
1278
+ def check_pandas_support(caller_name):
1279
+ """Raise ImportError with detailed error message if pandas is not installed.
1280
+
1281
+ Plot utilities like :func:`fetch_openml` should lazily import
1282
+ pandas and call this helper before any computation.
1283
+
1284
+ Parameters
1285
+ ----------
1286
+ caller_name : str
1287
+ The name of the caller that requires pandas.
1288
+
1289
+ Returns
1290
+ -------
1291
+ pandas
1292
+ The pandas package.
1293
+ """
1294
+ try:
1295
+ import pandas # noqa
1296
+
1297
+ return pandas
1298
+ except ImportError as e:
1299
+ raise ImportError("{} requires pandas.".format(caller_name)) from e
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_arpack.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .validation import check_random_state
2
+
3
+
4
+ def _init_arpack_v0(size, random_state):
5
+ """Initialize the starting vector for iteration in ARPACK functions.
6
+
7
+ Initialize a ndarray with values sampled from the uniform distribution on
8
+ [-1, 1]. This initialization model has been chosen to be consistent with
9
+ the ARPACK one as another initialization can lead to convergence issues.
10
+
11
+ Parameters
12
+ ----------
13
+ size : int
14
+ The size of the eigenvalue vector to be initialized.
15
+
16
+ random_state : int, RandomState instance or None, default=None
17
+ The seed of the pseudo random number generator used to generate a
18
+ uniform distribution. If int, random_state is the seed used by the
19
+ random number generator; If RandomState instance, random_state is the
20
+ random number generator; If None, the random number generator is the
21
+ RandomState instance used by `np.random`.
22
+
23
+ Returns
24
+ -------
25
+ v0 : ndarray of shape (size,)
26
+ The initialized vector.
27
+ """
28
+ random_state = check_random_state(random_state)
29
+ v0 = random_state.uniform(-1, 1, size)
30
+ return v0
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_array_api.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tools to support array_api."""
2
+ import itertools
3
+ import math
4
+ from functools import wraps
5
+
6
+ import numpy
7
+ import scipy.special as special
8
+
9
+ from .._config import get_config
10
+ from .fixes import parse_version
11
+
12
+
13
+ def yield_namespace_device_dtype_combinations():
14
+ """Yield supported namespace, device, dtype tuples for testing.
15
+
16
+ Use this to test that an estimator works with all combinations.
17
+
18
+ Returns
19
+ -------
20
+ array_namespace : str
21
+ The name of the Array API namespace.
22
+
23
+ device : str
24
+ The name of the device on which to allocate the arrays. Can be None to
25
+ indicate that the default value should be used.
26
+
27
+ dtype_name : str
28
+ The name of the data type to use for arrays. Can be None to indicate
29
+ that the default value should be used.
30
+ """
31
+ for array_namespace in [
32
+ # The following is used to test the array_api_compat wrapper when
33
+ # array_api_dispatch is enabled: in particular, the arrays used in the
34
+ # tests are regular numpy arrays without any "device" attribute.
35
+ "numpy",
36
+ # Stricter NumPy-based Array API implementation. The
37
+ # numpy.array_api.Array instances always a dummy "device" attribute.
38
+ "numpy.array_api",
39
+ "cupy",
40
+ "cupy.array_api",
41
+ "torch",
42
+ ]:
43
+ if array_namespace == "torch":
44
+ for device, dtype in itertools.product(
45
+ ("cpu", "cuda"), ("float64", "float32")
46
+ ):
47
+ yield array_namespace, device, dtype
48
+ yield array_namespace, "mps", "float32"
49
+ else:
50
+ yield array_namespace, None, None
51
+
52
+
53
+ def _check_array_api_dispatch(array_api_dispatch):
54
+ """Check that array_api_compat is installed and NumPy version is compatible.
55
+
56
+ array_api_compat follows NEP29, which has a higher minimum NumPy version than
57
+ scikit-learn.
58
+ """
59
+ if array_api_dispatch:
60
+ try:
61
+ import array_api_compat # noqa
62
+ except ImportError:
63
+ raise ImportError(
64
+ "array_api_compat is required to dispatch arrays using the API"
65
+ " specification"
66
+ )
67
+
68
+ numpy_version = parse_version(numpy.__version__)
69
+ min_numpy_version = "1.21"
70
+ if numpy_version < parse_version(min_numpy_version):
71
+ raise ImportError(
72
+ f"NumPy must be {min_numpy_version} or newer to dispatch array using"
73
+ " the API specification"
74
+ )
75
+
76
+
77
+ def device(x):
78
+ """Hardware device the array data resides on.
79
+
80
+ Parameters
81
+ ----------
82
+ x : array
83
+ Array instance from NumPy or an array API compatible library.
84
+
85
+ Returns
86
+ -------
87
+ out : device
88
+ `device` object (see the "Device Support" section of the array API spec).
89
+ """
90
+ if isinstance(x, (numpy.ndarray, numpy.generic)):
91
+ return "cpu"
92
+ return x.device
93
+
94
+
95
+ def size(x):
96
+ """Return the total number of elements of x.
97
+
98
+ Parameters
99
+ ----------
100
+ x : array
101
+ Array instance from NumPy or an array API compatible library.
102
+
103
+ Returns
104
+ -------
105
+ out : int
106
+ Total number of elements.
107
+ """
108
+ return math.prod(x.shape)
109
+
110
+
111
+ def _is_numpy_namespace(xp):
112
+ """Return True if xp is backed by NumPy."""
113
+ return xp.__name__ in {"numpy", "array_api_compat.numpy", "numpy.array_api"}
114
+
115
+
116
+ def _union1d(a, b, xp):
117
+ if _is_numpy_namespace(xp):
118
+ return xp.asarray(numpy.union1d(a, b))
119
+ assert a.ndim == b.ndim == 1
120
+ return xp.unique_values(xp.concat([xp.unique_values(a), xp.unique_values(b)]))
121
+
122
+
123
+ def isdtype(dtype, kind, *, xp):
124
+ """Returns a boolean indicating whether a provided dtype is of type "kind".
125
+
126
+ Included in the v2022.12 of the Array API spec.
127
+ https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
128
+ """
129
+ if isinstance(kind, tuple):
130
+ return any(_isdtype_single(dtype, k, xp=xp) for k in kind)
131
+ else:
132
+ return _isdtype_single(dtype, kind, xp=xp)
133
+
134
+
135
+ def _isdtype_single(dtype, kind, *, xp):
136
+ if isinstance(kind, str):
137
+ if kind == "bool":
138
+ return dtype == xp.bool
139
+ elif kind == "signed integer":
140
+ return dtype in {xp.int8, xp.int16, xp.int32, xp.int64}
141
+ elif kind == "unsigned integer":
142
+ return dtype in {xp.uint8, xp.uint16, xp.uint32, xp.uint64}
143
+ elif kind == "integral":
144
+ return any(
145
+ _isdtype_single(dtype, k, xp=xp)
146
+ for k in ("signed integer", "unsigned integer")
147
+ )
148
+ elif kind == "real floating":
149
+ return dtype in supported_float_dtypes(xp)
150
+ elif kind == "complex floating":
151
+ # Some name spaces do not have complex, such as cupy.array_api
152
+ # and numpy.array_api
153
+ complex_dtypes = set()
154
+ if hasattr(xp, "complex64"):
155
+ complex_dtypes.add(xp.complex64)
156
+ if hasattr(xp, "complex128"):
157
+ complex_dtypes.add(xp.complex128)
158
+ return dtype in complex_dtypes
159
+ elif kind == "numeric":
160
+ return any(
161
+ _isdtype_single(dtype, k, xp=xp)
162
+ for k in ("integral", "real floating", "complex floating")
163
+ )
164
+ else:
165
+ raise ValueError(f"Unrecognized data type kind: {kind!r}")
166
+ else:
167
+ return dtype == kind
168
+
169
+
170
+ def supported_float_dtypes(xp):
171
+ """Supported floating point types for the namespace
172
+
173
+ Note: float16 is not officially part of the Array API spec at the
174
+ time of writing but scikit-learn estimators and functions can choose
175
+ to accept it when xp.float16 is defined.
176
+
177
+ https://data-apis.org/array-api/latest/API_specification/data_types.html
178
+ """
179
+ if hasattr(xp, "float16"):
180
+ return (xp.float64, xp.float32, xp.float16)
181
+ else:
182
+ return (xp.float64, xp.float32)
183
+
184
+
185
+ class _ArrayAPIWrapper:
186
+ """sklearn specific Array API compatibility wrapper
187
+
188
+ This wrapper makes it possible for scikit-learn maintainers to
189
+ deal with discrepancies between different implementations of the
190
+ Python Array API standard and its evolution over time.
191
+
192
+ The Python Array API standard specification:
193
+ https://data-apis.org/array-api/latest/
194
+
195
+ Documentation of the NumPy implementation:
196
+ https://numpy.org/neps/nep-0047-array-api-standard.html
197
+ """
198
+
199
+ def __init__(self, array_namespace):
200
+ self._namespace = array_namespace
201
+
202
+ def __getattr__(self, name):
203
+ return getattr(self._namespace, name)
204
+
205
+ def __eq__(self, other):
206
+ return self._namespace == other._namespace
207
+
208
+ def isdtype(self, dtype, kind):
209
+ return isdtype(dtype, kind, xp=self._namespace)
210
+
211
+
212
+ def _check_device_cpu(device): # noqa
213
+ if device not in {"cpu", None}:
214
+ raise ValueError(f"Unsupported device for NumPy: {device!r}")
215
+
216
+
217
+ def _accept_device_cpu(func):
218
+ @wraps(func)
219
+ def wrapped_func(*args, **kwargs):
220
+ _check_device_cpu(kwargs.pop("device", None))
221
+ return func(*args, **kwargs)
222
+
223
+ return wrapped_func
224
+
225
+
226
+ class _NumPyAPIWrapper:
227
+ """Array API compat wrapper for any numpy version
228
+
229
+ NumPy < 1.22 does not expose the numpy.array_api namespace. This
230
+ wrapper makes it possible to write code that uses the standard
231
+ Array API while working with any version of NumPy supported by
232
+ scikit-learn.
233
+
234
+ See the `get_namespace()` public function for more details.
235
+ """
236
+
237
+ # Creation functions in spec:
238
+ # https://data-apis.org/array-api/latest/API_specification/creation_functions.html
239
+ _CREATION_FUNCS = {
240
+ "arange",
241
+ "empty",
242
+ "empty_like",
243
+ "eye",
244
+ "full",
245
+ "full_like",
246
+ "linspace",
247
+ "ones",
248
+ "ones_like",
249
+ "zeros",
250
+ "zeros_like",
251
+ }
252
+ # Data types in spec
253
+ # https://data-apis.org/array-api/latest/API_specification/data_types.html
254
+ _DTYPES = {
255
+ "int8",
256
+ "int16",
257
+ "int32",
258
+ "int64",
259
+ "uint8",
260
+ "uint16",
261
+ "uint32",
262
+ "uint64",
263
+ # XXX: float16 is not part of the Array API spec but exposed by
264
+ # some namespaces.
265
+ "float16",
266
+ "float32",
267
+ "float64",
268
+ "complex64",
269
+ "complex128",
270
+ }
271
+
272
+ def __getattr__(self, name):
273
+ attr = getattr(numpy, name)
274
+
275
+ # Support device kwargs and make sure they are on the CPU
276
+ if name in self._CREATION_FUNCS:
277
+ return _accept_device_cpu(attr)
278
+
279
+ # Convert to dtype objects
280
+ if name in self._DTYPES:
281
+ return numpy.dtype(attr)
282
+ return attr
283
+
284
+ @property
285
+ def bool(self):
286
+ return numpy.bool_
287
+
288
+ def astype(self, x, dtype, *, copy=True, casting="unsafe"):
289
+ # astype is not defined in the top level NumPy namespace
290
+ return x.astype(dtype, copy=copy, casting=casting)
291
+
292
+ def asarray(self, x, *, dtype=None, device=None, copy=None): # noqa
293
+ _check_device_cpu(device)
294
+ # Support copy in NumPy namespace
295
+ if copy is True:
296
+ return numpy.array(x, copy=True, dtype=dtype)
297
+ else:
298
+ return numpy.asarray(x, dtype=dtype)
299
+
300
+ def unique_inverse(self, x):
301
+ return numpy.unique(x, return_inverse=True)
302
+
303
+ def unique_counts(self, x):
304
+ return numpy.unique(x, return_counts=True)
305
+
306
+ def unique_values(self, x):
307
+ return numpy.unique(x)
308
+
309
+ def concat(self, arrays, *, axis=None):
310
+ return numpy.concatenate(arrays, axis=axis)
311
+
312
+ def reshape(self, x, shape, *, copy=None):
313
+ """Gives a new shape to an array without changing its data.
314
+
315
+ The Array API specification requires shape to be a tuple.
316
+ https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html
317
+ """
318
+ if not isinstance(shape, tuple):
319
+ raise TypeError(
320
+ f"shape must be a tuple, got {shape!r} of type {type(shape)}"
321
+ )
322
+
323
+ if copy is True:
324
+ x = x.copy()
325
+ return numpy.reshape(x, shape)
326
+
327
+ def isdtype(self, dtype, kind):
328
+ return isdtype(dtype, kind, xp=self)
329
+
330
+
331
+ _NUMPY_API_WRAPPER_INSTANCE = _NumPyAPIWrapper()
332
+
333
+
334
+ def get_namespace(*arrays):
335
+ """Get namespace of arrays.
336
+
337
+ Introspect `arrays` arguments and return their common Array API
338
+ compatible namespace object, if any. NumPy 1.22 and later can
339
+ construct such containers using the `numpy.array_api` namespace
340
+ for instance.
341
+
342
+ See: https://numpy.org/neps/nep-0047-array-api-standard.html
343
+
344
+ If `arrays` are regular numpy arrays, an instance of the
345
+ `_NumPyAPIWrapper` compatibility wrapper is returned instead.
346
+
347
+ Namespace support is not enabled by default. To enabled it
348
+ call:
349
+
350
+ sklearn.set_config(array_api_dispatch=True)
351
+
352
+ or:
353
+
354
+ with sklearn.config_context(array_api_dispatch=True):
355
+ # your code here
356
+
357
+ Otherwise an instance of the `_NumPyAPIWrapper`
358
+ compatibility wrapper is always returned irrespective of
359
+ the fact that arrays implement the `__array_namespace__`
360
+ protocol or not.
361
+
362
+ Parameters
363
+ ----------
364
+ *arrays : array objects
365
+ Array objects.
366
+
367
+ Returns
368
+ -------
369
+ namespace : module
370
+ Namespace shared by array objects. If any of the `arrays` are not arrays,
371
+ the namespace defaults to NumPy.
372
+
373
+ is_array_api_compliant : bool
374
+ True if the arrays are containers that implement the Array API spec.
375
+ Always False when array_api_dispatch=False.
376
+ """
377
+ array_api_dispatch = get_config()["array_api_dispatch"]
378
+ if not array_api_dispatch:
379
+ return _NUMPY_API_WRAPPER_INSTANCE, False
380
+
381
+ _check_array_api_dispatch(array_api_dispatch)
382
+
383
+ # array-api-compat is a required dependency of scikit-learn only when
384
+ # configuring `array_api_dispatch=True`. Its import should therefore be
385
+ # protected by _check_array_api_dispatch to display an informative error
386
+ # message in case it is missing.
387
+ import array_api_compat
388
+
389
+ namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True
390
+
391
+ # These namespaces need additional wrapping to smooth out small differences
392
+ # between implementations
393
+ if namespace.__name__ in {"numpy.array_api", "cupy.array_api"}:
394
+ namespace = _ArrayAPIWrapper(namespace)
395
+
396
+ return namespace, is_array_api_compliant
397
+
398
+
399
+ def _expit(X):
400
+ xp, _ = get_namespace(X)
401
+ if _is_numpy_namespace(xp):
402
+ return xp.asarray(special.expit(numpy.asarray(X)))
403
+
404
+ return 1.0 / (1.0 + xp.exp(-X))
405
+
406
+
407
+ def _add_to_diagonal(array, value, xp):
408
+ # Workaround for the lack of support for xp.reshape(a, shape, copy=False) in
409
+ # numpy.array_api: https://github.com/numpy/numpy/issues/23410
410
+ value = xp.asarray(value, dtype=array.dtype)
411
+ if _is_numpy_namespace(xp):
412
+ array_np = numpy.asarray(array)
413
+ array_np.flat[:: array.shape[0] + 1] += value
414
+ return xp.asarray(array_np)
415
+ elif value.ndim == 1:
416
+ for i in range(array.shape[0]):
417
+ array[i, i] += value[i]
418
+ else:
419
+ # scalar value
420
+ for i in range(array.shape[0]):
421
+ array[i, i] += value
422
+
423
+
424
+ def _weighted_sum(sample_score, sample_weight, normalize=False, xp=None):
425
+ # XXX: this function accepts Array API input but returns a Python scalar
426
+ # float. The call to float() is convenient because it removes the need to
427
+ # move back results from device to host memory (e.g. calling `.cpu()` on a
428
+ # torch tensor). However, this might interact in unexpected ways (break?)
429
+ # with lazy Array API implementations. See:
430
+ # https://github.com/data-apis/array-api/issues/642
431
+ if xp is None:
432
+ xp, _ = get_namespace(sample_score)
433
+ if normalize and _is_numpy_namespace(xp):
434
+ sample_score_np = numpy.asarray(sample_score)
435
+ if sample_weight is not None:
436
+ sample_weight_np = numpy.asarray(sample_weight)
437
+ else:
438
+ sample_weight_np = None
439
+ return float(numpy.average(sample_score_np, weights=sample_weight_np))
440
+
441
+ if not xp.isdtype(sample_score.dtype, "real floating"):
442
+ # We move to cpu device ahead of time since certain devices may not support
443
+ # float64, but we want the same precision for all devices and namespaces.
444
+ sample_score = xp.astype(xp.asarray(sample_score, device="cpu"), xp.float64)
445
+
446
+ if sample_weight is not None:
447
+ sample_weight = xp.asarray(
448
+ sample_weight, dtype=sample_score.dtype, device=device(sample_score)
449
+ )
450
+ if not xp.isdtype(sample_weight.dtype, "real floating"):
451
+ sample_weight = xp.astype(sample_weight, xp.float64)
452
+
453
+ if normalize:
454
+ if sample_weight is not None:
455
+ scale = xp.sum(sample_weight)
456
+ else:
457
+ scale = sample_score.shape[0]
458
+ if scale != 0:
459
+ sample_score = sample_score / scale
460
+
461
+ if sample_weight is not None:
462
+ return float(sample_score @ sample_weight)
463
+ else:
464
+ return float(xp.sum(sample_score))
465
+
466
+
467
+ def _nanmin(X, axis=None):
468
+ # TODO: refactor once nan-aware reductions are standardized:
469
+ # https://github.com/data-apis/array-api/issues/621
470
+ xp, _ = get_namespace(X)
471
+ if _is_numpy_namespace(xp):
472
+ return xp.asarray(numpy.nanmin(X, axis=axis))
473
+
474
+ else:
475
+ mask = xp.isnan(X)
476
+ X = xp.min(xp.where(mask, xp.asarray(+xp.inf, device=device(X)), X), axis=axis)
477
+ # Replace Infs from all NaN slices with NaN again
478
+ mask = xp.all(mask, axis=axis)
479
+ if xp.any(mask):
480
+ X = xp.where(mask, xp.asarray(xp.nan), X)
481
+ return X
482
+
483
+
484
+ def _nanmax(X, axis=None):
485
+ # TODO: refactor once nan-aware reductions are standardized:
486
+ # https://github.com/data-apis/array-api/issues/621
487
+ xp, _ = get_namespace(X)
488
+ if _is_numpy_namespace(xp):
489
+ return xp.asarray(numpy.nanmax(X, axis=axis))
490
+
491
+ else:
492
+ mask = xp.isnan(X)
493
+ X = xp.max(xp.where(mask, xp.asarray(-xp.inf, device=device(X)), X), axis=axis)
494
+ # Replace Infs from all NaN slices with NaN again
495
+ mask = xp.all(mask, axis=axis)
496
+ if xp.any(mask):
497
+ X = xp.where(mask, xp.asarray(xp.nan), X)
498
+ return X
499
+
500
+
501
+ def _asarray_with_order(array, dtype=None, order=None, copy=None, *, xp=None):
502
+ """Helper to support the order kwarg only for NumPy-backed arrays
503
+
504
+ Memory layout parameter `order` is not exposed in the Array API standard,
505
+ however some input validation code in scikit-learn needs to work both
506
+ for classes and functions that will leverage Array API only operations
507
+ and for code that inherently relies on NumPy backed data containers with
508
+ specific memory layout constraints (e.g. our own Cython code). The
509
+ purpose of this helper is to make it possible to share code for data
510
+ container validation without memory copies for both downstream use cases:
511
+ the `order` parameter is only enforced if the input array implementation
512
+ is NumPy based, otherwise `order` is just silently ignored.
513
+ """
514
+ if xp is None:
515
+ xp, _ = get_namespace(array)
516
+ if _is_numpy_namespace(xp):
517
+ # Use NumPy API to support order
518
+ if copy is True:
519
+ array = numpy.array(array, order=order, dtype=dtype)
520
+ else:
521
+ array = numpy.asarray(array, order=order, dtype=dtype)
522
+
523
+ # At this point array is a NumPy ndarray. We convert it to an array
524
+ # container that is consistent with the input's namespace.
525
+ return xp.asarray(array)
526
+ else:
527
+ return xp.asarray(array, dtype=dtype, copy=copy)
528
+
529
+
530
+ def _convert_to_numpy(array, xp):
531
+ """Convert X into a NumPy ndarray on the CPU."""
532
+ xp_name = xp.__name__
533
+
534
+ if xp_name in {"array_api_compat.torch", "torch"}:
535
+ return array.cpu().numpy()
536
+ elif xp_name == "cupy.array_api":
537
+ return array._array.get()
538
+ elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover
539
+ return array.get()
540
+
541
+ return numpy.asarray(array)
542
+
543
+
544
+ def _estimator_with_converted_arrays(estimator, converter):
545
+ """Create new estimator which converting all attributes that are arrays.
546
+
547
+ The converter is called on all NumPy arrays and arrays that support the
548
+ `DLPack interface <https://dmlc.github.io/dlpack/latest/>`__.
549
+
550
+ Parameters
551
+ ----------
552
+ estimator : Estimator
553
+ Estimator to convert
554
+
555
+ converter : callable
556
+ Callable that takes an array attribute and returns the converted array.
557
+
558
+ Returns
559
+ -------
560
+ new_estimator : Estimator
561
+ Convert estimator
562
+ """
563
+ from sklearn.base import clone
564
+
565
+ new_estimator = clone(estimator)
566
+ for key, attribute in vars(estimator).items():
567
+ if hasattr(attribute, "__dlpack__") or isinstance(attribute, numpy.ndarray):
568
+ attribute = converter(attribute)
569
+ setattr(new_estimator, key, attribute)
570
+ return new_estimator
571
+
572
+
573
+ def _atol_for_type(dtype):
574
+ """Return the absolute tolerance for a given dtype."""
575
+ return numpy.finfo(dtype).eps * 100
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_available_if.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import update_wrapper, wraps
2
+ from types import MethodType
3
+
4
+
5
+ class _AvailableIfDescriptor:
6
+ """Implements a conditional property using the descriptor protocol.
7
+
8
+ Using this class to create a decorator will raise an ``AttributeError``
9
+ if check(self) returns a falsey value. Note that if check raises an error
10
+ this will also result in hasattr returning false.
11
+
12
+ See https://docs.python.org/3/howto/descriptor.html for an explanation of
13
+ descriptors.
14
+ """
15
+
16
+ def __init__(self, fn, check, attribute_name):
17
+ self.fn = fn
18
+ self.check = check
19
+ self.attribute_name = attribute_name
20
+
21
+ # update the docstring of the descriptor
22
+ update_wrapper(self, fn)
23
+
24
+ def _check(self, obj, owner):
25
+ attr_err_msg = (
26
+ f"This {repr(owner.__name__)} has no attribute {repr(self.attribute_name)}"
27
+ )
28
+ try:
29
+ check_result = self.check(obj)
30
+ except Exception as e:
31
+ raise AttributeError(attr_err_msg) from e
32
+
33
+ if not check_result:
34
+ raise AttributeError(attr_err_msg)
35
+
36
+ def __get__(self, obj, owner=None):
37
+ if obj is not None:
38
+ # delegate only on instances, not the classes.
39
+ # this is to allow access to the docstrings.
40
+ self._check(obj, owner=owner)
41
+ out = MethodType(self.fn, obj)
42
+
43
+ else:
44
+ # This makes it possible to use the decorated method as an unbound method,
45
+ # for instance when monkeypatching.
46
+ @wraps(self.fn)
47
+ def out(*args, **kwargs):
48
+ self._check(args[0], owner=owner)
49
+ return self.fn(*args, **kwargs)
50
+
51
+ return out
52
+
53
+
54
+ def available_if(check):
55
+ """An attribute that is available only if check returns a truthy value.
56
+
57
+ Parameters
58
+ ----------
59
+ check : callable
60
+ When passed the object with the decorated method, this should return
61
+ a truthy value if the attribute is available, and either return False
62
+ or raise an AttributeError if not available.
63
+
64
+ Returns
65
+ -------
66
+ callable
67
+ Callable makes the decorated method available if `check` returns
68
+ a truthy value, otherwise the decorated method is unavailable.
69
+
70
+ Examples
71
+ --------
72
+ >>> from sklearn.utils.metaestimators import available_if
73
+ >>> class HelloIfEven:
74
+ ... def __init__(self, x):
75
+ ... self.x = x
76
+ ...
77
+ ... def _x_is_even(self):
78
+ ... return self.x % 2 == 0
79
+ ...
80
+ ... @available_if(_x_is_even)
81
+ ... def say_hello(self):
82
+ ... print("Hello")
83
+ ...
84
+ >>> obj = HelloIfEven(1)
85
+ >>> hasattr(obj, "say_hello")
86
+ False
87
+ >>> obj.x = 2
88
+ >>> hasattr(obj, "say_hello")
89
+ True
90
+ >>> obj.say_hello()
91
+ Hello
92
+ """
93
+ return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_bunch.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+
4
+ class Bunch(dict):
5
+ """Container object exposing keys as attributes.
6
+
7
+ Bunch objects are sometimes used as an output for functions and methods.
8
+ They extend dictionaries by enabling values to be accessed by key,
9
+ `bunch["value_key"]`, or by an attribute, `bunch.value_key`.
10
+
11
+ Examples
12
+ --------
13
+ >>> from sklearn.utils import Bunch
14
+ >>> b = Bunch(a=1, b=2)
15
+ >>> b['b']
16
+ 2
17
+ >>> b.b
18
+ 2
19
+ >>> b.a = 3
20
+ >>> b['a']
21
+ 3
22
+ >>> b.c = 6
23
+ >>> b['c']
24
+ 6
25
+ """
26
+
27
+ def __init__(self, **kwargs):
28
+ super().__init__(kwargs)
29
+
30
+ # Map from deprecated key to warning message
31
+ self.__dict__["_deprecated_key_to_warnings"] = {}
32
+
33
+ def __getitem__(self, key):
34
+ if key in self.__dict__.get("_deprecated_key_to_warnings", {}):
35
+ warnings.warn(
36
+ self._deprecated_key_to_warnings[key],
37
+ FutureWarning,
38
+ )
39
+ return super().__getitem__(key)
40
+
41
+ def _set_deprecated(self, value, *, new_key, deprecated_key, warning_message):
42
+ """Set key in dictionary to be deprecated with its warning message."""
43
+ self.__dict__["_deprecated_key_to_warnings"][deprecated_key] = warning_message
44
+ self[new_key] = self[deprecated_key] = value
45
+
46
+ def __setattr__(self, key, value):
47
+ self[key] = value
48
+
49
+ def __dir__(self):
50
+ return self.keys()
51
+
52
+ def __getattr__(self, key):
53
+ try:
54
+ return self[key]
55
+ except KeyError:
56
+ raise AttributeError(key)
57
+
58
+ def __setstate__(self, state):
59
+ # Bunch pickles generated with scikit-learn 0.16.* have an non
60
+ # empty __dict__. This causes a surprising behaviour when
61
+ # loading these pickles scikit-learn 0.17: reading bunch.key
62
+ # uses __dict__ but assigning to bunch.key use __setattr__ and
63
+ # only changes bunch['key']. More details can be found at:
64
+ # https://github.com/scikit-learn/scikit-learn/issues/6196.
65
+ # Overriding __setstate__ to be a noop has the effect of
66
+ # ignoring the pickled __dict__
67
+ pass
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.pxd ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cython cimport floating
2
+
3
+
4
+ cpdef enum BLAS_Order:
5
+ RowMajor # C contiguous
6
+ ColMajor # Fortran contiguous
7
+
8
+
9
+ cpdef enum BLAS_Trans:
10
+ NoTrans = 110 # correspond to 'n'
11
+ Trans = 116 # correspond to 't'
12
+
13
+
14
+ # BLAS Level 1 ################################################################
15
+ cdef floating _dot(int, const floating*, int, const floating*, int) noexcept nogil
16
+
17
+ cdef floating _asum(int, const floating*, int) noexcept nogil
18
+
19
+ cdef void _axpy(int, floating, const floating*, int, floating*, int) noexcept nogil
20
+
21
+ cdef floating _nrm2(int, const floating*, int) noexcept nogil
22
+
23
+ cdef void _copy(int, const floating*, int, const floating*, int) noexcept nogil
24
+
25
+ cdef void _scal(int, floating, const floating*, int) noexcept nogil
26
+
27
+ cdef void _rotg(floating*, floating*, floating*, floating*) noexcept nogil
28
+
29
+ cdef void _rot(int, floating*, int, floating*, int, floating, floating) noexcept nogil
30
+
31
+ # BLAS Level 2 ################################################################
32
+ cdef void _gemv(BLAS_Order, BLAS_Trans, int, int, floating, const floating*, int,
33
+ const floating*, int, floating, floating*, int) noexcept nogil
34
+
35
+ cdef void _ger(BLAS_Order, int, int, floating, const floating*, int, const floating*,
36
+ int, floating*, int) noexcept nogil
37
+
38
+ # BLASLevel 3 ################################################################
39
+ cdef void _gemm(BLAS_Order, BLAS_Trans, BLAS_Trans, int, int, int, floating,
40
+ const floating*, int, const floating*, int, floating, floating*,
41
+ int) noexcept nogil
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #$id {
2
+ /* Definition of color scheme common for light and dark mode */
3
+ --sklearn-color-text: black;
4
+ --sklearn-color-line: gray;
5
+ /* Definition of color scheme for unfitted estimators */
6
+ --sklearn-color-unfitted-level-0: #fff5e6;
7
+ --sklearn-color-unfitted-level-1: #f6e4d2;
8
+ --sklearn-color-unfitted-level-2: #ffe0b3;
9
+ --sklearn-color-unfitted-level-3: chocolate;
10
+ /* Definition of color scheme for fitted estimators */
11
+ --sklearn-color-fitted-level-0: #f0f8ff;
12
+ --sklearn-color-fitted-level-1: #d4ebff;
13
+ --sklearn-color-fitted-level-2: #b3dbfd;
14
+ --sklearn-color-fitted-level-3: cornflowerblue;
15
+
16
+ /* Specific color for light theme */
17
+ --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black)));
18
+ --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, white)));
19
+ --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black)));
20
+ --sklearn-color-icon: #696969;
21
+
22
+ @media (prefers-color-scheme: dark) {
23
+ /* Redefinition of color scheme for dark theme */
24
+ --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white)));
25
+ --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, #111)));
26
+ --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white)));
27
+ --sklearn-color-icon: #878787;
28
+ }
29
+ }
30
+
31
+ #$id {
32
+ color: var(--sklearn-color-text);
33
+ }
34
+
35
+ #$id pre {
36
+ padding: 0;
37
+ }
38
+
39
+ #$id input.sk-hidden--visually {
40
+ border: 0;
41
+ clip: rect(1px 1px 1px 1px);
42
+ clip: rect(1px, 1px, 1px, 1px);
43
+ height: 1px;
44
+ margin: -1px;
45
+ overflow: hidden;
46
+ padding: 0;
47
+ position: absolute;
48
+ width: 1px;
49
+ }
50
+
51
+ #$id div.sk-dashed-wrapped {
52
+ border: 1px dashed var(--sklearn-color-line);
53
+ margin: 0 0.4em 0.5em 0.4em;
54
+ box-sizing: border-box;
55
+ padding-bottom: 0.4em;
56
+ background-color: var(--sklearn-color-background);
57
+ }
58
+
59
+ #$id div.sk-container {
60
+ /* jupyter's `normalize.less` sets `[hidden] { display: none; }`
61
+ but bootstrap.min.css set `[hidden] { display: none !important; }`
62
+ so we also need the `!important` here to be able to override the
63
+ default hidden behavior on the sphinx rendered scikit-learn.org.
64
+ See: https://github.com/scikit-learn/scikit-learn/issues/21755 */
65
+ display: inline-block !important;
66
+ position: relative;
67
+ }
68
+
69
+ #$id div.sk-text-repr-fallback {
70
+ display: none;
71
+ }
72
+
73
+ div.sk-parallel-item,
74
+ div.sk-serial,
75
+ div.sk-item {
76
+ /* draw centered vertical line to link estimators */
77
+ background-image: linear-gradient(var(--sklearn-color-text-on-default-background), var(--sklearn-color-text-on-default-background));
78
+ background-size: 2px 100%;
79
+ background-repeat: no-repeat;
80
+ background-position: center center;
81
+ }
82
+
83
+ /* Parallel-specific style estimator block */
84
+
85
+ #$id div.sk-parallel-item::after {
86
+ content: "";
87
+ width: 100%;
88
+ border-bottom: 2px solid var(--sklearn-color-text-on-default-background);
89
+ flex-grow: 1;
90
+ }
91
+
92
+ #$id div.sk-parallel {
93
+ display: flex;
94
+ align-items: stretch;
95
+ justify-content: center;
96
+ background-color: var(--sklearn-color-background);
97
+ position: relative;
98
+ }
99
+
100
+ #$id div.sk-parallel-item {
101
+ display: flex;
102
+ flex-direction: column;
103
+ }
104
+
105
+ #$id div.sk-parallel-item:first-child::after {
106
+ align-self: flex-end;
107
+ width: 50%;
108
+ }
109
+
110
+ #$id div.sk-parallel-item:last-child::after {
111
+ align-self: flex-start;
112
+ width: 50%;
113
+ }
114
+
115
+ #$id div.sk-parallel-item:only-child::after {
116
+ width: 0;
117
+ }
118
+
119
+ /* Serial-specific style estimator block */
120
+
121
+ #$id div.sk-serial {
122
+ display: flex;
123
+ flex-direction: column;
124
+ align-items: center;
125
+ background-color: var(--sklearn-color-background);
126
+ padding-right: 1em;
127
+ padding-left: 1em;
128
+ }
129
+
130
+
131
+ /* Toggleable style: style used for estimator/Pipeline/ColumnTransformer box that is
132
+ clickable and can be expanded/collapsed.
133
+ - Pipeline and ColumnTransformer use this feature and define the default style
134
+ - Estimators will overwrite some part of the style using the `sk-estimator` class
135
+ */
136
+
137
+ /* Pipeline and ColumnTransformer style (default) */
138
+
139
+ #$id div.sk-toggleable {
140
+ /* Default theme specific background. It is overwritten whether we have a
141
+ specific estimator or a Pipeline/ColumnTransformer */
142
+ background-color: var(--sklearn-color-background);
143
+ }
144
+
145
+ /* Toggleable label */
146
+ #$id label.sk-toggleable__label {
147
+ cursor: pointer;
148
+ display: block;
149
+ width: 100%;
150
+ margin-bottom: 0;
151
+ padding: 0.5em;
152
+ box-sizing: border-box;
153
+ text-align: center;
154
+ }
155
+
156
+ #$id label.sk-toggleable__label-arrow:before {
157
+ /* Arrow on the left of the label */
158
+ content: "▸";
159
+ float: left;
160
+ margin-right: 0.25em;
161
+ color: var(--sklearn-color-icon);
162
+ }
163
+
164
+ #$id label.sk-toggleable__label-arrow:hover:before {
165
+ color: var(--sklearn-color-text);
166
+ }
167
+
168
+ /* Toggleable content - dropdown */
169
+
170
+ #$id div.sk-toggleable__content {
171
+ max-height: 0;
172
+ max-width: 0;
173
+ overflow: hidden;
174
+ text-align: left;
175
+ /* unfitted */
176
+ background-color: var(--sklearn-color-unfitted-level-0);
177
+ }
178
+
179
+ #$id div.sk-toggleable__content.fitted {
180
+ /* fitted */
181
+ background-color: var(--sklearn-color-fitted-level-0);
182
+ }
183
+
184
+ #$id div.sk-toggleable__content pre {
185
+ margin: 0.2em;
186
+ border-radius: 0.25em;
187
+ color: var(--sklearn-color-text);
188
+ /* unfitted */
189
+ background-color: var(--sklearn-color-unfitted-level-0);
190
+ }
191
+
192
+ #$id div.sk-toggleable__content.fitted pre {
193
+ /* unfitted */
194
+ background-color: var(--sklearn-color-fitted-level-0);
195
+ }
196
+
197
+ #$id input.sk-toggleable__control:checked~div.sk-toggleable__content {
198
+ /* Expand drop-down */
199
+ max-height: 200px;
200
+ max-width: 100%;
201
+ overflow: auto;
202
+ }
203
+
204
+ #$id input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {
205
+ content: "▾";
206
+ }
207
+
208
+ /* Pipeline/ColumnTransformer-specific style */
209
+
210
+ #$id div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {
211
+ color: var(--sklearn-color-text);
212
+ background-color: var(--sklearn-color-unfitted-level-2);
213
+ }
214
+
215
+ #$id div.sk-label.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label {
216
+ background-color: var(--sklearn-color-fitted-level-2);
217
+ }
218
+
219
+ /* Estimator-specific style */
220
+
221
+ /* Colorize estimator box */
222
+ #$id div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {
223
+ /* unfitted */
224
+ background-color: var(--sklearn-color-unfitted-level-2);
225
+ }
226
+
227
+ #$id div.sk-estimator.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label {
228
+ /* fitted */
229
+ background-color: var(--sklearn-color-fitted-level-2);
230
+ }
231
+
232
+ #$id div.sk-label label.sk-toggleable__label,
233
+ #$id div.sk-label label {
234
+ /* The background is the default theme color */
235
+ color: var(--sklearn-color-text-on-default-background);
236
+ }
237
+
238
+ /* On hover, darken the color of the background */
239
+ #$id div.sk-label:hover label.sk-toggleable__label {
240
+ color: var(--sklearn-color-text);
241
+ background-color: var(--sklearn-color-unfitted-level-2);
242
+ }
243
+
244
+ /* Label box, darken color on hover, fitted */
245
+ #$id div.sk-label.fitted:hover label.sk-toggleable__label.fitted {
246
+ color: var(--sklearn-color-text);
247
+ background-color: var(--sklearn-color-fitted-level-2);
248
+ }
249
+
250
+ /* Estimator label */
251
+
252
+ #$id div.sk-label label {
253
+ font-family: monospace;
254
+ font-weight: bold;
255
+ display: inline-block;
256
+ line-height: 1.2em;
257
+ }
258
+
259
+ #$id div.sk-label-container {
260
+ text-align: center;
261
+ }
262
+
263
+ /* Estimator-specific */
264
+ #$id div.sk-estimator {
265
+ font-family: monospace;
266
+ border: 1px dotted var(--sklearn-color-border-box);
267
+ border-radius: 0.25em;
268
+ box-sizing: border-box;
269
+ margin-bottom: 0.5em;
270
+ /* unfitted */
271
+ background-color: var(--sklearn-color-unfitted-level-0);
272
+ }
273
+
274
+ #$id div.sk-estimator.fitted {
275
+ /* fitted */
276
+ background-color: var(--sklearn-color-fitted-level-0);
277
+ }
278
+
279
+ /* on hover */
280
+ #$id div.sk-estimator:hover {
281
+ /* unfitted */
282
+ background-color: var(--sklearn-color-unfitted-level-2);
283
+ }
284
+
285
+ #$id div.sk-estimator.fitted:hover {
286
+ /* fitted */
287
+ background-color: var(--sklearn-color-fitted-level-2);
288
+ }
289
+
290
+ /* Specification for estimator info (e.g. "i" and "?") */
291
+
292
+ /* Common style for "i" and "?" */
293
+
294
+ .sk-estimator-doc-link,
295
+ a:link.sk-estimator-doc-link,
296
+ a:visited.sk-estimator-doc-link {
297
+ float: right;
298
+ font-size: smaller;
299
+ line-height: 1em;
300
+ font-family: monospace;
301
+ background-color: var(--sklearn-color-background);
302
+ border-radius: 1em;
303
+ height: 1em;
304
+ width: 1em;
305
+ text-decoration: none !important;
306
+ margin-left: 1ex;
307
+ /* unfitted */
308
+ border: var(--sklearn-color-unfitted-level-1) 1pt solid;
309
+ color: var(--sklearn-color-unfitted-level-1);
310
+ }
311
+
312
+ .sk-estimator-doc-link.fitted,
313
+ a:link.sk-estimator-doc-link.fitted,
314
+ a:visited.sk-estimator-doc-link.fitted {
315
+ /* fitted */
316
+ border: var(--sklearn-color-fitted-level-1) 1pt solid;
317
+ color: var(--sklearn-color-fitted-level-1);
318
+ }
319
+
320
+ /* On hover */
321
+ div.sk-estimator:hover .sk-estimator-doc-link:hover,
322
+ .sk-estimator-doc-link:hover,
323
+ div.sk-label-container:hover .sk-estimator-doc-link:hover,
324
+ .sk-estimator-doc-link:hover {
325
+ /* unfitted */
326
+ background-color: var(--sklearn-color-unfitted-level-3);
327
+ color: var(--sklearn-color-background);
328
+ text-decoration: none;
329
+ }
330
+
331
+ div.sk-estimator.fitted:hover .sk-estimator-doc-link.fitted:hover,
332
+ .sk-estimator-doc-link.fitted:hover,
333
+ div.sk-label-container:hover .sk-estimator-doc-link.fitted:hover,
334
+ .sk-estimator-doc-link.fitted:hover {
335
+ /* fitted */
336
+ background-color: var(--sklearn-color-fitted-level-3);
337
+ color: var(--sklearn-color-background);
338
+ text-decoration: none;
339
+ }
340
+
341
+ /* Span, style for the box shown on hovering the info icon */
342
+ .sk-estimator-doc-link span {
343
+ display: none;
344
+ z-index: 9999;
345
+ position: relative;
346
+ font-weight: normal;
347
+ right: .2ex;
348
+ padding: .5ex;
349
+ margin: .5ex;
350
+ width: min-content;
351
+ min-width: 20ex;
352
+ max-width: 50ex;
353
+ color: var(--sklearn-color-text);
354
+ box-shadow: 2pt 2pt 4pt #999;
355
+ /* unfitted */
356
+ background: var(--sklearn-color-unfitted-level-0);
357
+ border: .5pt solid var(--sklearn-color-unfitted-level-3);
358
+ }
359
+
360
+ .sk-estimator-doc-link.fitted span {
361
+ /* fitted */
362
+ background: var(--sklearn-color-fitted-level-0);
363
+ border: var(--sklearn-color-fitted-level-3);
364
+ }
365
+
366
+ .sk-estimator-doc-link:hover span {
367
+ display: block;
368
+ }
369
+
370
+ /* "?"-specific style due to the `<a>` HTML tag */
371
+
372
+ #$id a.estimator_doc_link {
373
+ float: right;
374
+ font-size: 1rem;
375
+ line-height: 1em;
376
+ font-family: monospace;
377
+ background-color: var(--sklearn-color-background);
378
+ border-radius: 1rem;
379
+ height: 1rem;
380
+ width: 1rem;
381
+ text-decoration: none;
382
+ /* unfitted */
383
+ color: var(--sklearn-color-unfitted-level-1);
384
+ border: var(--sklearn-color-unfitted-level-1) 1pt solid;
385
+ }
386
+
387
+ #$id a.estimator_doc_link.fitted {
388
+ /* fitted */
389
+ border: var(--sklearn-color-fitted-level-1) 1pt solid;
390
+ color: var(--sklearn-color-fitted-level-1);
391
+ }
392
+
393
+ /* On hover */
394
+ #$id a.estimator_doc_link:hover {
395
+ /* unfitted */
396
+ background-color: var(--sklearn-color-unfitted-level-3);
397
+ color: var(--sklearn-color-background);
398
+ text-decoration: none;
399
+ }
400
+
401
+ #$id a.estimator_doc_link.fitted:hover {
402
+ /* fitted */
403
+ background-color: var(--sklearn-color-fitted-level-3);
404
+ }
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Gael Varoquaux
2
+ # License: BSD
3
+ """
4
+ Uses C++ map containers for fast dict-like behavior with keys being
5
+ integers, and values float.
6
+ """
7
+
8
+ from libcpp.map cimport map as cpp_map
9
+
10
+ from ._typedefs cimport float64_t, intp_t
11
+
12
+
13
+ ###############################################################################
14
+ # An object to be used in Python
15
+
16
+ cdef class IntFloatDict:
17
+ cdef cpp_map[intp_t, float64_t] my_map
18
+ cdef _to_arrays(self, intp_t [:] keys, float64_t [:] values)
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (34.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.pxd ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Heap routines, used in various Cython implementations.
2
+
3
+ from cython cimport floating
4
+
5
+ from ._typedefs cimport intp_t
6
+
7
+
8
+ cdef int heap_push(
9
+ floating* values,
10
+ intp_t* indices,
11
+ intp_t size,
12
+ floating val,
13
+ intp_t val_idx,
14
+ ) noexcept nogil
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mask.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import suppress
2
+
3
+ import numpy as np
4
+ from scipy import sparse as sp
5
+
6
+ from . import is_scalar_nan
7
+ from .fixes import _object_dtype_isnan
8
+
9
+
10
+ def _get_dense_mask(X, value_to_mask):
11
+ with suppress(ImportError, AttributeError):
12
+ # We also suppress `AttributeError` because older versions of pandas do
13
+ # not have `NA`.
14
+ import pandas
15
+
16
+ if value_to_mask is pandas.NA:
17
+ return pandas.isna(X)
18
+
19
+ if is_scalar_nan(value_to_mask):
20
+ if X.dtype.kind == "f":
21
+ Xt = np.isnan(X)
22
+ elif X.dtype.kind in ("i", "u"):
23
+ # can't have NaNs in integer array.
24
+ Xt = np.zeros(X.shape, dtype=bool)
25
+ else:
26
+ # np.isnan does not work on object dtypes.
27
+ Xt = _object_dtype_isnan(X)
28
+ else:
29
+ Xt = X == value_to_mask
30
+
31
+ return Xt
32
+
33
+
34
+ def _get_mask(X, value_to_mask):
35
+ """Compute the boolean mask X == value_to_mask.
36
+
37
+ Parameters
38
+ ----------
39
+ X : {ndarray, sparse matrix} of shape (n_samples, n_features)
40
+ Input data, where ``n_samples`` is the number of samples and
41
+ ``n_features`` is the number of features.
42
+
43
+ value_to_mask : {int, float}
44
+ The value which is to be masked in X.
45
+
46
+ Returns
47
+ -------
48
+ X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
49
+ Missing mask.
50
+ """
51
+ if not sp.issparse(X):
52
+ # For all cases apart of a sparse input where we need to reconstruct
53
+ # a sparse output
54
+ return _get_dense_mask(X, value_to_mask)
55
+
56
+ Xt = _get_dense_mask(X.data, value_to_mask)
57
+
58
+ sparse_constructor = sp.csr_matrix if X.format == "csr" else sp.csc_matrix
59
+ Xt_sparse = sparse_constructor(
60
+ (Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool
61
+ )
62
+
63
+ return Xt_sparse
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py ADDED
@@ -0,0 +1,1563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Metadata Routing Utility
3
+
4
+ In order to better understand the components implemented in this file, one
5
+ needs to understand their relationship to one another.
6
+
7
+ The only relevant public API for end users are the ``set_{method}_request``,
8
+ e.g. ``estimator.set_fit_request(sample_weight=True)``. However, third-party
9
+ developers and users who implement custom meta-estimators, need to deal with
10
+ the objects implemented in this file.
11
+
12
+ All estimators (should) implement a ``get_metadata_routing`` method, returning
13
+ the routing requests set for the estimator. This method is automatically
14
+ implemented via ``BaseEstimator`` for all simple estimators, but needs a custom
15
+ implementation for meta-estimators.
16
+
17
+ In non-routing consumers, i.e. the simplest case, e.g. ``SVM``,
18
+ ``get_metadata_routing`` returns a ``MetadataRequest`` object.
19
+
20
+ In routers, e.g. meta-estimators and a multi metric scorer,
21
+ ``get_metadata_routing`` returns a ``MetadataRouter`` object.
22
+
23
+ An object which is both a router and a consumer, e.g. a meta-estimator which
24
+ consumes ``sample_weight`` and routes ``sample_weight`` to its sub-estimators,
25
+ routing information includes both information about the object itself (added
26
+ via ``MetadataRouter.add_self_request``), as well as the routing information
27
+ for its sub-estimators.
28
+
29
+ A ``MetadataRequest`` instance includes one ``MethodMetadataRequest`` per
30
+ method in ``METHODS``, which includes ``fit``, ``score``, etc.
31
+
32
+ Request values are added to the routing mechanism by adding them to
33
+ ``MethodMetadataRequest`` instances, e.g.
34
+ ``metadatarequest.fit.add(param="sample_weight", alias="my_weights")``. This is
35
+ used in ``set_{method}_request`` which are automatically generated, so users
36
+ and developers almost never need to directly call methods on a
37
+ ``MethodMetadataRequest``.
38
+
39
+ The ``alias`` above in the ``add`` method has to be either a string (an alias),
40
+ or a {True (requested), False (unrequested), None (error if passed)}``. There
41
+ are some other special values such as ``UNUSED`` and ``WARN`` which are used
42
+ for purposes such as warning of removing a metadata in a child class, but not
43
+ used by the end users.
44
+
45
+ ``MetadataRouter`` includes information about sub-objects' routing and how
46
+ methods are mapped together. For instance, the information about which methods
47
+ of a sub-estimator are called in which methods of the meta-estimator are all
48
+ stored here. Conceptually, this information looks like:
49
+
50
+ ```
51
+ {
52
+ "sub_estimator1": (
53
+ mapping=[(caller="fit", callee="transform"), ...],
54
+ router=MetadataRequest(...), # or another MetadataRouter
55
+ ),
56
+ ...
57
+ }
58
+ ```
59
+
60
+ To give the above representation some structure, we use the following objects:
61
+
62
+ - ``(caller, callee)`` is a namedtuple called ``MethodPair``
63
+
64
+ - The list of ``MethodPair`` stored in the ``mapping`` field is a
65
+ ``MethodMapping`` object
66
+
67
+ - ``(mapping=..., router=...)`` is a namedtuple called ``RouterMappingPair``
68
+
69
+ The ``set_{method}_request`` methods are dynamically generated for estimators
70
+ which inherit from the ``BaseEstimator``. This is done by attaching instances
71
+ of the ``RequestMethod`` descriptor to classes, which is done in the
72
+ ``_MetadataRequester`` class, and ``BaseEstimator`` inherits from this mixin.
73
+ This mixin also implements the ``get_metadata_routing``, which meta-estimators
74
+ need to override, but it works for simple consumers as is.
75
+ """
76
+
77
+ # Author: Adrin Jalali <[email protected]>
78
+ # License: BSD 3 clause
79
+
80
+ import inspect
81
+ from collections import namedtuple
82
+ from copy import deepcopy
83
+ from typing import TYPE_CHECKING, Optional, Union
84
+ from warnings import warn
85
+
86
+ from .. import get_config
87
+ from ..exceptions import UnsetMetadataPassedError
88
+ from ._bunch import Bunch
89
+
90
+ # Only the following methods are supported in the routing mechanism. Adding new
91
+ # methods at the moment involves monkeypatching this list.
92
+ # Note that if this list is changed or monkeypatched, the corresponding method
93
+ # needs to be added under a TYPE_CHECKING condition like the one done here in
94
+ # _MetadataRequester
95
+ SIMPLE_METHODS = [
96
+ "fit",
97
+ "partial_fit",
98
+ "predict",
99
+ "predict_proba",
100
+ "predict_log_proba",
101
+ "decision_function",
102
+ "score",
103
+ "split",
104
+ "transform",
105
+ "inverse_transform",
106
+ ]
107
+
108
+ # These methods are a composite of other methods and one cannot set their
109
+ # requests directly. Instead they should be set by setting the requests of the
110
+ # simple methods which make the composite ones.
111
+ COMPOSITE_METHODS = {
112
+ "fit_transform": ["fit", "transform"],
113
+ "fit_predict": ["fit", "predict"],
114
+ }
115
+
116
+ METHODS = SIMPLE_METHODS + list(COMPOSITE_METHODS.keys())
117
+
118
+
119
+ def _routing_enabled():
120
+ """Return whether metadata routing is enabled.
121
+
122
+ .. versionadded:: 1.3
123
+
124
+ Returns
125
+ -------
126
+ enabled : bool
127
+ Whether metadata routing is enabled. If the config is not set, it
128
+ defaults to False.
129
+ """
130
+ return get_config().get("enable_metadata_routing", False)
131
+
132
+
133
+ def _raise_for_params(params, owner, method):
134
+ """Raise an error if metadata routing is not enabled and params are passed.
135
+
136
+ .. versionadded:: 1.4
137
+
138
+ Parameters
139
+ ----------
140
+ params : dict
141
+ The metadata passed to a method.
142
+
143
+ owner : object
144
+ The object to which the method belongs.
145
+
146
+ method : str
147
+ The name of the method, e.g. "fit".
148
+
149
+ Raises
150
+ ------
151
+ ValueError
152
+ If metadata routing is not enabled and params are passed.
153
+ """
154
+ caller = (
155
+ f"{owner.__class__.__name__}.{method}" if method else owner.__class__.__name__
156
+ )
157
+ if not _routing_enabled() and params:
158
+ raise ValueError(
159
+ f"Passing extra keyword arguments to {caller} is only supported if"
160
+ " enable_metadata_routing=True, which you can set using"
161
+ " `sklearn.set_config`. See the User Guide"
162
+ " <https://scikit-learn.org/stable/metadata_routing.html> for more"
163
+ f" details. Extra parameters passed are: {set(params)}"
164
+ )
165
+
166
+
167
+ def _raise_for_unsupported_routing(obj, method, **kwargs):
168
+ """Raise when metadata routing is enabled and metadata is passed.
169
+
170
+ This is used in meta-estimators which have not implemented metadata routing
171
+ to prevent silent bugs. There is no need to use this function if the
172
+ meta-estimator is not accepting any metadata, especially in `fit`, since
173
+ if a meta-estimator accepts any metadata, they would do that in `fit` as
174
+ well.
175
+
176
+ Parameters
177
+ ----------
178
+ obj : estimator
179
+ The estimator for which we're raising the error.
180
+
181
+ method : str
182
+ The method where the error is raised.
183
+
184
+ **kwargs : dict
185
+ The metadata passed to the method.
186
+ """
187
+ kwargs = {key: value for key, value in kwargs.items() if value is not None}
188
+ if _routing_enabled() and kwargs:
189
+ cls_name = obj.__class__.__name__
190
+ raise NotImplementedError(
191
+ f"{cls_name}.{method} cannot accept given metadata ({set(kwargs.keys())})"
192
+ f" since metadata routing is not yet implemented for {cls_name}."
193
+ )
194
+
195
+
196
+ class _RoutingNotSupportedMixin:
197
+ """A mixin to be used to remove the default `get_metadata_routing`.
198
+
199
+ This is used in meta-estimators where metadata routing is not yet
200
+ implemented.
201
+
202
+ This also makes it clear in our rendered documentation that this method
203
+ cannot be used.
204
+ """
205
+
206
+ def get_metadata_routing(self):
207
+ """Raise `NotImplementedError`.
208
+
209
+ This estimator does not support metadata routing yet."""
210
+ raise NotImplementedError(
211
+ f"{self.__class__.__name__} has not implemented metadata routing yet."
212
+ )
213
+
214
+
215
+ # Request values
216
+ # ==============
217
+ # Each request value needs to be one of the following values, or an alias.
218
+
219
+ # this is used in `__metadata_request__*` attributes to indicate that a
220
+ # metadata is not present even though it may be present in the
221
+ # corresponding method's signature.
222
+ UNUSED = "$UNUSED$"
223
+
224
+ # this is used whenever a default value is changed, and therefore the user
225
+ # should explicitly set the value, otherwise a warning is shown. An example
226
+ # is when a meta-estimator is only a router, but then becomes also a
227
+ # consumer in a new release.
228
+ WARN = "$WARN$"
229
+
230
+ # this is the default used in `set_{method}_request` methods to indicate no
231
+ # change requested by the user.
232
+ UNCHANGED = "$UNCHANGED$"
233
+
234
+ VALID_REQUEST_VALUES = [False, True, None, UNUSED, WARN]
235
+
236
+
237
+ def request_is_alias(item):
238
+ """Check if an item is a valid alias.
239
+
240
+ Values in ``VALID_REQUEST_VALUES`` are not considered aliases in this
241
+ context. Only a string which is a valid identifier is.
242
+
243
+ Parameters
244
+ ----------
245
+ item : object
246
+ The given item to be checked if it can be an alias.
247
+
248
+ Returns
249
+ -------
250
+ result : bool
251
+ Whether the given item is a valid alias.
252
+ """
253
+ if item in VALID_REQUEST_VALUES:
254
+ return False
255
+
256
+ # item is only an alias if it's a valid identifier
257
+ return isinstance(item, str) and item.isidentifier()
258
+
259
+
260
+ def request_is_valid(item):
261
+ """Check if an item is a valid request value (and not an alias).
262
+
263
+ Parameters
264
+ ----------
265
+ item : object
266
+ The given item to be checked.
267
+
268
+ Returns
269
+ -------
270
+ result : bool
271
+ Whether the given item is valid.
272
+ """
273
+ return item in VALID_REQUEST_VALUES
274
+
275
+
276
+ # Metadata Request for Simple Consumers
277
+ # =====================================
278
+ # This section includes MethodMetadataRequest and MetadataRequest which are
279
+ # used in simple consumers.
280
+
281
+
282
+ class MethodMetadataRequest:
283
+ """A prescription of how metadata is to be passed to a single method.
284
+
285
+ Refer to :class:`MetadataRequest` for how this class is used.
286
+
287
+ .. versionadded:: 1.3
288
+
289
+ Parameters
290
+ ----------
291
+ owner : str
292
+ A display name for the object owning these requests.
293
+
294
+ method : str
295
+ The name of the method to which these requests belong.
296
+
297
+ requests : dict of {str: bool, None or str}, default=None
298
+ The initial requests for this method.
299
+ """
300
+
301
+ def __init__(self, owner, method, requests=None):
302
+ self._requests = requests or dict()
303
+ self.owner = owner
304
+ self.method = method
305
+
306
+ @property
307
+ def requests(self):
308
+ """Dictionary of the form: ``{key: alias}``."""
309
+ return self._requests
310
+
311
+ def add_request(
312
+ self,
313
+ *,
314
+ param,
315
+ alias,
316
+ ):
317
+ """Add request info for a metadata.
318
+
319
+ Parameters
320
+ ----------
321
+ param : str
322
+ The property for which a request is set.
323
+
324
+ alias : str, or {True, False, None}
325
+ Specifies which metadata should be routed to `param`
326
+
327
+ - str: the name (or alias) of metadata given to a meta-estimator that
328
+ should be routed to this parameter.
329
+
330
+ - True: requested
331
+
332
+ - False: not requested
333
+
334
+ - None: error if passed
335
+ """
336
+ if not request_is_alias(alias) and not request_is_valid(alias):
337
+ raise ValueError(
338
+ f"The alias you're setting for `{param}` should be either a "
339
+ "valid identifier or one of {None, True, False}, but given "
340
+ f"value is: `{alias}`"
341
+ )
342
+
343
+ if alias == param:
344
+ alias = True
345
+
346
+ if alias == UNUSED:
347
+ if param in self._requests:
348
+ del self._requests[param]
349
+ else:
350
+ raise ValueError(
351
+ f"Trying to remove parameter {param} with UNUSED which doesn't"
352
+ " exist."
353
+ )
354
+ else:
355
+ self._requests[param] = alias
356
+
357
+ return self
358
+
359
+ def _get_param_names(self, return_alias):
360
+ """Get names of all metadata that can be consumed or routed by this method.
361
+
362
+ This method returns the names of all metadata, even the ``False``
363
+ ones.
364
+
365
+ Parameters
366
+ ----------
367
+ return_alias : bool
368
+ Controls whether original or aliased names should be returned. If
369
+ ``False``, aliases are ignored and original names are returned.
370
+
371
+ Returns
372
+ -------
373
+ names : set of str
374
+ A set of strings with the names of all parameters.
375
+ """
376
+ return set(
377
+ alias if return_alias and not request_is_valid(alias) else prop
378
+ for prop, alias in self._requests.items()
379
+ if not request_is_valid(alias) or alias is not False
380
+ )
381
+
382
+ def _check_warnings(self, *, params):
383
+ """Check whether metadata is passed which is marked as WARN.
384
+
385
+ If any metadata is passed which is marked as WARN, a warning is raised.
386
+
387
+ Parameters
388
+ ----------
389
+ params : dict
390
+ The metadata passed to a method.
391
+ """
392
+ params = {} if params is None else params
393
+ warn_params = {
394
+ prop
395
+ for prop, alias in self._requests.items()
396
+ if alias == WARN and prop in params
397
+ }
398
+ for param in warn_params:
399
+ warn(
400
+ f"Support for {param} has recently been added to this class. "
401
+ "To maintain backward compatibility, it is ignored now. "
402
+ "You can set the request value to False to silence this "
403
+ "warning, or to True to consume and use the metadata."
404
+ )
405
+
406
+ def _route_params(self, params):
407
+ """Prepare the given parameters to be passed to the method.
408
+
409
+ The output of this method can be used directly as the input to the
410
+ corresponding method as extra props.
411
+
412
+ Parameters
413
+ ----------
414
+ params : dict
415
+ A dictionary of provided metadata.
416
+
417
+ Returns
418
+ -------
419
+ params : Bunch
420
+ A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the
421
+ corresponding method.
422
+ """
423
+ self._check_warnings(params=params)
424
+ unrequested = dict()
425
+ args = {arg: value for arg, value in params.items() if value is not None}
426
+ res = Bunch()
427
+ for prop, alias in self._requests.items():
428
+ if alias is False or alias == WARN:
429
+ continue
430
+ elif alias is True and prop in args:
431
+ res[prop] = args[prop]
432
+ elif alias is None and prop in args:
433
+ unrequested[prop] = args[prop]
434
+ elif alias in args:
435
+ res[prop] = args[alias]
436
+ if unrequested:
437
+ raise UnsetMetadataPassedError(
438
+ message=(
439
+ f"[{', '.join([key for key in unrequested])}] are passed but are"
440
+ " not explicitly set as requested or not for"
441
+ f" {self.owner}.{self.method}"
442
+ ),
443
+ unrequested_params=unrequested,
444
+ routed_params=res,
445
+ )
446
+ return res
447
+
448
+ def _consumes(self, params):
449
+ """Check whether the given parameters are consumed by this method.
450
+
451
+ Parameters
452
+ ----------
453
+ params : iterable of str
454
+ An iterable of parameters to check.
455
+
456
+ Returns
457
+ -------
458
+ consumed : set of str
459
+ A set of parameters which are consumed by this method.
460
+ """
461
+ params = set(params)
462
+ res = set()
463
+ for prop, alias in self._requests.items():
464
+ if alias is True and prop in params:
465
+ res.add(prop)
466
+ elif isinstance(alias, str) and alias in params:
467
+ res.add(alias)
468
+ return res
469
+
470
+ def _serialize(self):
471
+ """Serialize the object.
472
+
473
+ Returns
474
+ -------
475
+ obj : dict
476
+ A serialized version of the instance in the form of a dictionary.
477
+ """
478
+ return self._requests
479
+
480
+ def __repr__(self):
481
+ return str(self._serialize())
482
+
483
+ def __str__(self):
484
+ return str(repr(self))
485
+
486
+
487
+ class MetadataRequest:
488
+ """Contains the metadata request info of a consumer.
489
+
490
+ Instances of `MethodMetadataRequest` are used in this class for each
491
+ available method under `metadatarequest.{method}`.
492
+
493
+ Consumer-only classes such as simple estimators return a serialized
494
+ version of this class as the output of `get_metadata_routing()`.
495
+
496
+ .. versionadded:: 1.3
497
+
498
+ Parameters
499
+ ----------
500
+ owner : str
501
+ The name of the object to which these requests belong.
502
+ """
503
+
504
+ # this is here for us to use this attribute's value instead of doing
505
+ # `isinstance` in our checks, so that we avoid issues when people vendor
506
+ # this file instead of using it directly from scikit-learn.
507
+ _type = "metadata_request"
508
+
509
+ def __init__(self, owner):
510
+ self.owner = owner
511
+ for method in SIMPLE_METHODS:
512
+ setattr(
513
+ self,
514
+ method,
515
+ MethodMetadataRequest(owner=owner, method=method),
516
+ )
517
+
518
+ def consumes(self, method, params):
519
+ """Check whether the given parameters are consumed by the given method.
520
+
521
+ .. versionadded:: 1.4
522
+
523
+ Parameters
524
+ ----------
525
+ method : str
526
+ The name of the method to check.
527
+
528
+ params : iterable of str
529
+ An iterable of parameters to check.
530
+
531
+ Returns
532
+ -------
533
+ consumed : set of str
534
+ A set of parameters which are consumed by the given method.
535
+ """
536
+ return getattr(self, method)._consumes(params=params)
537
+
538
+ def __getattr__(self, name):
539
+ # Called when the default attribute access fails with an AttributeError
540
+ # (either __getattribute__() raises an AttributeError because name is
541
+ # not an instance attribute or an attribute in the class tree for self;
542
+ # or __get__() of a name property raises AttributeError). This method
543
+ # should either return the (computed) attribute value or raise an
544
+ # AttributeError exception.
545
+ # https://docs.python.org/3/reference/datamodel.html#object.__getattr__
546
+ if name not in COMPOSITE_METHODS:
547
+ raise AttributeError(
548
+ f"'{self.__class__.__name__}' object has no attribute '{name}'"
549
+ )
550
+
551
+ requests = {}
552
+ for method in COMPOSITE_METHODS[name]:
553
+ mmr = getattr(self, method)
554
+ existing = set(requests.keys())
555
+ upcoming = set(mmr.requests.keys())
556
+ common = existing & upcoming
557
+ conflicts = [key for key in common if requests[key] != mmr._requests[key]]
558
+ if conflicts:
559
+ raise ValueError(
560
+ f"Conflicting metadata requests for {', '.join(conflicts)} while"
561
+ f" composing the requests for {name}. Metadata with the same name"
562
+ f" for methods {', '.join(COMPOSITE_METHODS[name])} should have the"
563
+ " same request value."
564
+ )
565
+ requests.update(mmr._requests)
566
+ return MethodMetadataRequest(owner=self.owner, method=name, requests=requests)
567
+
568
+ def _get_param_names(self, method, return_alias, ignore_self_request=None):
569
+ """Get names of all metadata that can be consumed or routed by specified \
570
+ method.
571
+
572
+ This method returns the names of all metadata, even the ``False``
573
+ ones.
574
+
575
+ Parameters
576
+ ----------
577
+ method : str
578
+ The name of the method for which metadata names are requested.
579
+
580
+ return_alias : bool
581
+ Controls whether original or aliased names should be returned. If
582
+ ``False``, aliases are ignored and original names are returned.
583
+
584
+ ignore_self_request : bool
585
+ Ignored. Present for API compatibility.
586
+
587
+ Returns
588
+ -------
589
+ names : set of str
590
+ A set of strings with the names of all parameters.
591
+ """
592
+ return getattr(self, method)._get_param_names(return_alias=return_alias)
593
+
594
+ def _route_params(self, *, method, params):
595
+ """Prepare the given parameters to be passed to the method.
596
+
597
+ The output of this method can be used directly as the input to the
598
+ corresponding method as extra keyword arguments to pass metadata.
599
+
600
+ Parameters
601
+ ----------
602
+ method : str
603
+ The name of the method for which the parameters are requested and
604
+ routed.
605
+
606
+ params : dict
607
+ A dictionary of provided metadata.
608
+
609
+ Returns
610
+ -------
611
+ params : Bunch
612
+ A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the
613
+ corresponding method.
614
+ """
615
+ return getattr(self, method)._route_params(params=params)
616
+
617
+ def _check_warnings(self, *, method, params):
618
+ """Check whether metadata is passed which is marked as WARN.
619
+
620
+ If any metadata is passed which is marked as WARN, a warning is raised.
621
+
622
+ Parameters
623
+ ----------
624
+ method : str
625
+ The name of the method for which the warnings should be checked.
626
+
627
+ params : dict
628
+ The metadata passed to a method.
629
+ """
630
+ getattr(self, method)._check_warnings(params=params)
631
+
632
+ def _serialize(self):
633
+ """Serialize the object.
634
+
635
+ Returns
636
+ -------
637
+ obj : dict
638
+ A serialized version of the instance in the form of a dictionary.
639
+ """
640
+ output = dict()
641
+ for method in SIMPLE_METHODS:
642
+ mmr = getattr(self, method)
643
+ if len(mmr.requests):
644
+ output[method] = mmr._serialize()
645
+ return output
646
+
647
+ def __repr__(self):
648
+ return str(self._serialize())
649
+
650
+ def __str__(self):
651
+ return str(repr(self))
652
+
653
+
654
+ # Metadata Request for Routers
655
+ # ============================
656
+ # This section includes all objects required for MetadataRouter which is used
657
+ # in routers, returned by their ``get_metadata_routing``.
658
+
659
+ # This namedtuple is used to store a (mapping, routing) pair. Mapping is a
660
+ # MethodMapping object, and routing is the output of `get_metadata_routing`.
661
+ # MetadataRouter stores a collection of these namedtuples.
662
+ RouterMappingPair = namedtuple("RouterMappingPair", ["mapping", "router"])
663
+
664
+ # A namedtuple storing a single method route. A collection of these namedtuples
665
+ # is stored in a MetadataRouter.
666
+ MethodPair = namedtuple("MethodPair", ["callee", "caller"])
667
+
668
+
669
+ class MethodMapping:
670
+ """Stores the mapping between callee and caller methods for a router.
671
+
672
+ This class is primarily used in a ``get_metadata_routing()`` of a router
673
+ object when defining the mapping between a sub-object (a sub-estimator or a
674
+ scorer) to the router's methods. It stores a collection of ``Route``
675
+ namedtuples.
676
+
677
+ Iterating through an instance of this class will yield named
678
+ ``MethodPair(callee, caller)`` tuples.
679
+
680
+ .. versionadded:: 1.3
681
+ """
682
+
683
+ def __init__(self):
684
+ self._routes = []
685
+
686
+ def __iter__(self):
687
+ return iter(self._routes)
688
+
689
+ def add(self, *, callee, caller):
690
+ """Add a method mapping.
691
+
692
+ Parameters
693
+ ----------
694
+ callee : str
695
+ Child object's method name. This method is called in ``caller``.
696
+
697
+ caller : str
698
+ Parent estimator's method name in which the ``callee`` is called.
699
+
700
+ Returns
701
+ -------
702
+ self : MethodMapping
703
+ Returns self.
704
+ """
705
+ if callee not in METHODS:
706
+ raise ValueError(
707
+ f"Given callee:{callee} is not a valid method. Valid methods are:"
708
+ f" {METHODS}"
709
+ )
710
+ if caller not in METHODS:
711
+ raise ValueError(
712
+ f"Given caller:{caller} is not a valid method. Valid methods are:"
713
+ f" {METHODS}"
714
+ )
715
+ self._routes.append(MethodPair(callee=callee, caller=caller))
716
+ return self
717
+
718
+ def _serialize(self):
719
+ """Serialize the object.
720
+
721
+ Returns
722
+ -------
723
+ obj : list
724
+ A serialized version of the instance in the form of a list.
725
+ """
726
+ result = list()
727
+ for route in self._routes:
728
+ result.append({"callee": route.callee, "caller": route.caller})
729
+ return result
730
+
731
+ @classmethod
732
+ def from_str(cls, route):
733
+ """Construct an instance from a string.
734
+
735
+ Parameters
736
+ ----------
737
+ route : str
738
+ A string representing the mapping, it can be:
739
+
740
+ - `"one-to-one"`: a one to one mapping for all methods.
741
+ - `"method"`: the name of a single method, such as ``fit``,
742
+ ``transform``, ``score``, etc.
743
+
744
+ Returns
745
+ -------
746
+ obj : MethodMapping
747
+ A :class:`~sklearn.utils.metadata_routing.MethodMapping` instance
748
+ constructed from the given string.
749
+ """
750
+ routing = cls()
751
+ if route == "one-to-one":
752
+ for method in METHODS:
753
+ routing.add(callee=method, caller=method)
754
+ elif route in METHODS:
755
+ routing.add(callee=route, caller=route)
756
+ else:
757
+ raise ValueError("route should be 'one-to-one' or a single method!")
758
+ return routing
759
+
760
+ def __repr__(self):
761
+ return str(self._serialize())
762
+
763
+ def __str__(self):
764
+ return str(repr(self))
765
+
766
+
767
+ class MetadataRouter:
768
+ """Stores and handles metadata routing for a router object.
769
+
770
+ This class is used by router objects to store and handle metadata routing.
771
+ Routing information is stored as a dictionary of the form ``{"object_name":
772
+ RouteMappingPair(method_mapping, routing_info)}``, where ``method_mapping``
773
+ is an instance of :class:`~sklearn.utils.metadata_routing.MethodMapping` and
774
+ ``routing_info`` is either a
775
+ :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a
776
+ :class:`~sklearn.utils.metadata_routing.MetadataRouter` instance.
777
+
778
+ .. versionadded:: 1.3
779
+
780
+ Parameters
781
+ ----------
782
+ owner : str
783
+ The name of the object to which these requests belong.
784
+ """
785
+
786
+ # this is here for us to use this attribute's value instead of doing
787
+ # `isinstance`` in our checks, so that we avoid issues when people vendor
788
+ # this file instead of using it directly from scikit-learn.
789
+ _type = "metadata_router"
790
+
791
+ def __init__(self, owner):
792
+ self._route_mappings = dict()
793
+ # `_self_request` is used if the router is also a consumer.
794
+ # _self_request, (added using `add_self_request()`) is treated
795
+ # differently from the other objects which are stored in
796
+ # _route_mappings.
797
+ self._self_request = None
798
+ self.owner = owner
799
+
800
+ def add_self_request(self, obj):
801
+ """Add `self` (as a consumer) to the routing.
802
+
803
+ This method is used if the router is also a consumer, and hence the
804
+ router itself needs to be included in the routing. The passed object
805
+ can be an estimator or a
806
+ :class:`~sklearn.utils.metadata_routing.MetadataRequest`.
807
+
808
+ A router should add itself using this method instead of `add` since it
809
+ should be treated differently than the other objects to which metadata
810
+ is routed by the router.
811
+
812
+ Parameters
813
+ ----------
814
+ obj : object
815
+ This is typically the router instance, i.e. `self` in a
816
+ ``get_metadata_routing()`` implementation. It can also be a
817
+ ``MetadataRequest`` instance.
818
+
819
+ Returns
820
+ -------
821
+ self : MetadataRouter
822
+ Returns `self`.
823
+ """
824
+ if getattr(obj, "_type", None) == "metadata_request":
825
+ self._self_request = deepcopy(obj)
826
+ elif hasattr(obj, "_get_metadata_request"):
827
+ self._self_request = deepcopy(obj._get_metadata_request())
828
+ else:
829
+ raise ValueError(
830
+ "Given `obj` is neither a `MetadataRequest` nor does it implement the"
831
+ " required API. Inheriting from `BaseEstimator` implements the required"
832
+ " API."
833
+ )
834
+ return self
835
+
836
+ def add(self, *, method_mapping, **objs):
837
+ """Add named objects with their corresponding method mapping.
838
+
839
+ Parameters
840
+ ----------
841
+ method_mapping : MethodMapping or str
842
+ The mapping between the child and the parent's methods. If str, the
843
+ output of :func:`~sklearn.utils.metadata_routing.MethodMapping.from_str`
844
+ is used.
845
+
846
+ **objs : dict
847
+ A dictionary of objects from which metadata is extracted by calling
848
+ :func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them.
849
+
850
+ Returns
851
+ -------
852
+ self : MetadataRouter
853
+ Returns `self`.
854
+ """
855
+ if isinstance(method_mapping, str):
856
+ method_mapping = MethodMapping.from_str(method_mapping)
857
+ else:
858
+ method_mapping = deepcopy(method_mapping)
859
+
860
+ for name, obj in objs.items():
861
+ self._route_mappings[name] = RouterMappingPair(
862
+ mapping=method_mapping, router=get_routing_for_object(obj)
863
+ )
864
+ return self
865
+
866
+ def consumes(self, method, params):
867
+ """Check whether the given parameters are consumed by the given method.
868
+
869
+ .. versionadded:: 1.4
870
+
871
+ Parameters
872
+ ----------
873
+ method : str
874
+ The name of the method to check.
875
+
876
+ params : iterable of str
877
+ An iterable of parameters to check.
878
+
879
+ Returns
880
+ -------
881
+ consumed : set of str
882
+ A set of parameters which are consumed by the given method.
883
+ """
884
+ res = set()
885
+ if self._self_request:
886
+ res = res | self._self_request.consumes(method=method, params=params)
887
+
888
+ for _, route_mapping in self._route_mappings.items():
889
+ for callee, caller in route_mapping.mapping:
890
+ if caller == method:
891
+ res = res | route_mapping.router.consumes(
892
+ method=callee, params=params
893
+ )
894
+
895
+ return res
896
+
897
+ def _get_param_names(self, *, method, return_alias, ignore_self_request):
898
+ """Get names of all metadata that can be consumed or routed by specified \
899
+ method.
900
+
901
+ This method returns the names of all metadata, even the ``False``
902
+ ones.
903
+
904
+ Parameters
905
+ ----------
906
+ method : str
907
+ The name of the method for which metadata names are requested.
908
+
909
+ return_alias : bool
910
+ Controls whether original or aliased names should be returned,
911
+ which only applies to the stored `self`. If no `self` routing
912
+ object is stored, this parameter has no effect.
913
+
914
+ ignore_self_request : bool
915
+ If `self._self_request` should be ignored. This is used in `_route_params`.
916
+ If ``True``, ``return_alias`` has no effect.
917
+
918
+ Returns
919
+ -------
920
+ names : set of str
921
+ A set of strings with the names of all parameters.
922
+ """
923
+ res = set()
924
+ if self._self_request and not ignore_self_request:
925
+ res = res.union(
926
+ self._self_request._get_param_names(
927
+ method=method, return_alias=return_alias
928
+ )
929
+ )
930
+
931
+ for name, route_mapping in self._route_mappings.items():
932
+ for callee, caller in route_mapping.mapping:
933
+ if caller == method:
934
+ res = res.union(
935
+ route_mapping.router._get_param_names(
936
+ method=callee, return_alias=True, ignore_self_request=False
937
+ )
938
+ )
939
+ return res
940
+
941
+ def _route_params(self, *, params, method):
942
+ """Prepare the given parameters to be passed to the method.
943
+
944
+ This is used when a router is used as a child object of another router.
945
+ The parent router then passes all parameters understood by the child
946
+ object to it and delegates their validation to the child.
947
+
948
+ The output of this method can be used directly as the input to the
949
+ corresponding method as extra props.
950
+
951
+ Parameters
952
+ ----------
953
+ method : str
954
+ The name of the method for which the parameters are requested and
955
+ routed.
956
+
957
+ params : dict
958
+ A dictionary of provided metadata.
959
+
960
+ Returns
961
+ -------
962
+ params : Bunch
963
+ A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the
964
+ corresponding method.
965
+ """
966
+ res = Bunch()
967
+ if self._self_request:
968
+ res.update(self._self_request._route_params(params=params, method=method))
969
+
970
+ param_names = self._get_param_names(
971
+ method=method, return_alias=True, ignore_self_request=True
972
+ )
973
+ child_params = {
974
+ key: value for key, value in params.items() if key in param_names
975
+ }
976
+ for key in set(res.keys()).intersection(child_params.keys()):
977
+ # conflicts are okay if the passed objects are the same, but it's
978
+ # an issue if they're different objects.
979
+ if child_params[key] is not res[key]:
980
+ raise ValueError(
981
+ f"In {self.owner}, there is a conflict on {key} between what is"
982
+ " requested for this estimator and what is requested by its"
983
+ " children. You can resolve this conflict by using an alias for"
984
+ " the child estimator(s) requested metadata."
985
+ )
986
+
987
+ res.update(child_params)
988
+ return res
989
+
990
+ def route_params(self, *, caller, params):
991
+ """Return the input parameters requested by child objects.
992
+
993
+ The output of this method is a bunch, which includes the inputs for all
994
+ methods of each child object that are used in the router's `caller`
995
+ method.
996
+
997
+ If the router is also a consumer, it also checks for warnings of
998
+ `self`'s/consumer's requested metadata.
999
+
1000
+ Parameters
1001
+ ----------
1002
+ caller : str
1003
+ The name of the method for which the parameters are requested and
1004
+ routed. If called inside the :term:`fit` method of a router, it
1005
+ would be `"fit"`.
1006
+
1007
+ params : dict
1008
+ A dictionary of provided metadata.
1009
+
1010
+ Returns
1011
+ -------
1012
+ params : Bunch
1013
+ A :class:`~sklearn.utils.Bunch` of the form
1014
+ ``{"object_name": {"method_name": {prop: value}}}`` which can be
1015
+ used to pass the required metadata to corresponding methods or
1016
+ corresponding child objects.
1017
+ """
1018
+ if self._self_request:
1019
+ self._self_request._check_warnings(params=params, method=caller)
1020
+
1021
+ res = Bunch()
1022
+ for name, route_mapping in self._route_mappings.items():
1023
+ router, mapping = route_mapping.router, route_mapping.mapping
1024
+
1025
+ res[name] = Bunch()
1026
+ for _callee, _caller in mapping:
1027
+ if _caller == caller:
1028
+ res[name][_callee] = router._route_params(
1029
+ params=params, method=_callee
1030
+ )
1031
+ return res
1032
+
1033
+ def validate_metadata(self, *, method, params):
1034
+ """Validate given metadata for a method.
1035
+
1036
+ This raises a ``TypeError`` if some of the passed metadata are not
1037
+ understood by child objects.
1038
+
1039
+ Parameters
1040
+ ----------
1041
+ method : str
1042
+ The name of the method for which the parameters are requested and
1043
+ routed. If called inside the :term:`fit` method of a router, it
1044
+ would be `"fit"`.
1045
+
1046
+ params : dict
1047
+ A dictionary of provided metadata.
1048
+ """
1049
+ param_names = self._get_param_names(
1050
+ method=method, return_alias=False, ignore_self_request=False
1051
+ )
1052
+ if self._self_request:
1053
+ self_params = self._self_request._get_param_names(
1054
+ method=method, return_alias=False
1055
+ )
1056
+ else:
1057
+ self_params = set()
1058
+ extra_keys = set(params.keys()) - param_names - self_params
1059
+ if extra_keys:
1060
+ raise TypeError(
1061
+ f"{self.owner}.{method} got unexpected argument(s) {extra_keys}, which"
1062
+ " are not requested metadata in any object."
1063
+ )
1064
+
1065
+ def _serialize(self):
1066
+ """Serialize the object.
1067
+
1068
+ Returns
1069
+ -------
1070
+ obj : dict
1071
+ A serialized version of the instance in the form of a dictionary.
1072
+ """
1073
+ res = dict()
1074
+ if self._self_request:
1075
+ res["$self_request"] = self._self_request._serialize()
1076
+ for name, route_mapping in self._route_mappings.items():
1077
+ res[name] = dict()
1078
+ res[name]["mapping"] = route_mapping.mapping._serialize()
1079
+ res[name]["router"] = route_mapping.router._serialize()
1080
+
1081
+ return res
1082
+
1083
+ def __iter__(self):
1084
+ if self._self_request:
1085
+ yield (
1086
+ "$self_request",
1087
+ RouterMappingPair(
1088
+ mapping=MethodMapping.from_str("one-to-one"),
1089
+ router=self._self_request,
1090
+ ),
1091
+ )
1092
+ for name, route_mapping in self._route_mappings.items():
1093
+ yield (name, route_mapping)
1094
+
1095
+ def __repr__(self):
1096
+ return str(self._serialize())
1097
+
1098
+ def __str__(self):
1099
+ return str(repr(self))
1100
+
1101
+
1102
+ def get_routing_for_object(obj=None):
1103
+ """Get a ``Metadata{Router, Request}`` instance from the given object.
1104
+
1105
+ This function returns a
1106
+ :class:`~sklearn.utils.metadata_routing.MetadataRouter` or a
1107
+ :class:`~sklearn.utils.metadata_routing.MetadataRequest` from the given input.
1108
+
1109
+ This function always returns a copy or an instance constructed from the
1110
+ input, such that changing the output of this function will not change the
1111
+ original object.
1112
+
1113
+ .. versionadded:: 1.3
1114
+
1115
+ Parameters
1116
+ ----------
1117
+ obj : object
1118
+ - If the object is already a
1119
+ :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a
1120
+ :class:`~sklearn.utils.metadata_routing.MetadataRouter`, return a copy
1121
+ of that.
1122
+ - If the object provides a `get_metadata_routing` method, return a copy
1123
+ of the output of that method.
1124
+ - Returns an empty :class:`~sklearn.utils.metadata_routing.MetadataRequest`
1125
+ otherwise.
1126
+
1127
+ Returns
1128
+ -------
1129
+ obj : MetadataRequest or MetadataRouting
1130
+ A ``MetadataRequest`` or a ``MetadataRouting`` taken or created from
1131
+ the given object.
1132
+ """
1133
+ # doing this instead of a try/except since an AttributeError could be raised
1134
+ # for other reasons.
1135
+ if hasattr(obj, "get_metadata_routing"):
1136
+ return deepcopy(obj.get_metadata_routing())
1137
+
1138
+ elif getattr(obj, "_type", None) in ["metadata_request", "metadata_router"]:
1139
+ return deepcopy(obj)
1140
+
1141
+ return MetadataRequest(owner=None)
1142
+
1143
+
1144
+ # Request method
1145
+ # ==============
1146
+ # This section includes what's needed for the request method descriptor and
1147
+ # their dynamic generation in a meta class.
1148
+
1149
+ # These strings are used to dynamically generate the docstrings for
1150
+ # set_{method}_request methods.
1151
+ REQUESTER_DOC = """ Request metadata passed to the ``{method}`` method.
1152
+
1153
+ Note that this method is only relevant if
1154
+ ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
1155
+ Please see :ref:`User Guide <metadata_routing>` on how the routing
1156
+ mechanism works.
1157
+
1158
+ The options for each parameter are:
1159
+
1160
+ - ``True``: metadata is requested, and \
1161
+ passed to ``{method}`` if provided. The request is ignored if \
1162
+ metadata is not provided.
1163
+
1164
+ - ``False``: metadata is not requested and the meta-estimator \
1165
+ will not pass it to ``{method}``.
1166
+
1167
+ - ``None``: metadata is not requested, and the meta-estimator \
1168
+ will raise an error if the user provides it.
1169
+
1170
+ - ``str``: metadata should be passed to the meta-estimator with \
1171
+ this given alias instead of the original name.
1172
+
1173
+ The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
1174
+ existing request. This allows you to change the request for some
1175
+ parameters and not others.
1176
+
1177
+ .. versionadded:: 1.3
1178
+
1179
+ .. note::
1180
+ This method is only relevant if this estimator is used as a
1181
+ sub-estimator of a meta-estimator, e.g. used inside a
1182
+ :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
1183
+
1184
+ Parameters
1185
+ ----------
1186
+ """
1187
+ REQUESTER_DOC_PARAM = """ {metadata} : str, True, False, or None, \
1188
+ default=sklearn.utils.metadata_routing.UNCHANGED
1189
+ Metadata routing for ``{metadata}`` parameter in ``{method}``.
1190
+
1191
+ """
1192
+ REQUESTER_DOC_RETURN = """ Returns
1193
+ -------
1194
+ self : object
1195
+ The updated object.
1196
+ """
1197
+
1198
+
1199
+ class RequestMethod:
1200
+ """
1201
+ A descriptor for request methods.
1202
+
1203
+ .. versionadded:: 1.3
1204
+
1205
+ Parameters
1206
+ ----------
1207
+ name : str
1208
+ The name of the method for which the request function should be
1209
+ created, e.g. ``"fit"`` would create a ``set_fit_request`` function.
1210
+
1211
+ keys : list of str
1212
+ A list of strings which are accepted parameters by the created
1213
+ function, e.g. ``["sample_weight"]`` if the corresponding method
1214
+ accepts it as a metadata.
1215
+
1216
+ validate_keys : bool, default=True
1217
+ Whether to check if the requested parameters fit the actual parameters
1218
+ of the method.
1219
+
1220
+ Notes
1221
+ -----
1222
+ This class is a descriptor [1]_ and uses PEP-362 to set the signature of
1223
+ the returned function [2]_.
1224
+
1225
+ References
1226
+ ----------
1227
+ .. [1] https://docs.python.org/3/howto/descriptor.html
1228
+
1229
+ .. [2] https://www.python.org/dev/peps/pep-0362/
1230
+ """
1231
+
1232
+ def __init__(self, name, keys, validate_keys=True):
1233
+ self.name = name
1234
+ self.keys = keys
1235
+ self.validate_keys = validate_keys
1236
+
1237
+ def __get__(self, instance, owner):
1238
+ # we would want to have a method which accepts only the expected args
1239
+ def func(**kw):
1240
+ """Updates the request for provided parameters
1241
+
1242
+ This docstring is overwritten below.
1243
+ See REQUESTER_DOC for expected functionality
1244
+ """
1245
+ if not _routing_enabled():
1246
+ raise RuntimeError(
1247
+ "This method is only available when metadata routing is enabled."
1248
+ " You can enable it using"
1249
+ " sklearn.set_config(enable_metadata_routing=True)."
1250
+ )
1251
+
1252
+ if self.validate_keys and (set(kw) - set(self.keys)):
1253
+ raise TypeError(
1254
+ f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
1255
+ f" are: {set(self.keys)}"
1256
+ )
1257
+
1258
+ requests = instance._get_metadata_request()
1259
+ method_metadata_request = getattr(requests, self.name)
1260
+
1261
+ for prop, alias in kw.items():
1262
+ if alias is not UNCHANGED:
1263
+ method_metadata_request.add_request(param=prop, alias=alias)
1264
+ instance._metadata_request = requests
1265
+
1266
+ return instance
1267
+
1268
+ # Now we set the relevant attributes of the function so that it seems
1269
+ # like a normal method to the end user, with known expected arguments.
1270
+ func.__name__ = f"set_{self.name}_request"
1271
+ params = [
1272
+ inspect.Parameter(
1273
+ name="self",
1274
+ kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
1275
+ annotation=owner,
1276
+ )
1277
+ ]
1278
+ params.extend(
1279
+ [
1280
+ inspect.Parameter(
1281
+ k,
1282
+ inspect.Parameter.KEYWORD_ONLY,
1283
+ default=UNCHANGED,
1284
+ annotation=Optional[Union[bool, None, str]],
1285
+ )
1286
+ for k in self.keys
1287
+ ]
1288
+ )
1289
+ func.__signature__ = inspect.Signature(
1290
+ params,
1291
+ return_annotation=owner,
1292
+ )
1293
+ doc = REQUESTER_DOC.format(method=self.name)
1294
+ for metadata in self.keys:
1295
+ doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
1296
+ doc += REQUESTER_DOC_RETURN
1297
+ func.__doc__ = doc
1298
+ return func
1299
+
1300
+
1301
+ class _MetadataRequester:
1302
+ """Mixin class for adding metadata request functionality.
1303
+
1304
+ ``BaseEstimator`` inherits from this Mixin.
1305
+
1306
+ .. versionadded:: 1.3
1307
+ """
1308
+
1309
+ if TYPE_CHECKING: # pragma: no cover
1310
+ # This code is never run in runtime, but it's here for type checking.
1311
+ # Type checkers fail to understand that the `set_{method}_request`
1312
+ # methods are dynamically generated, and they complain that they are
1313
+ # not defined. We define them here to make type checkers happy.
1314
+ # During type checking analyzers assume this to be True.
1315
+ # The following list of defined methods mirrors the list of methods
1316
+ # in SIMPLE_METHODS.
1317
+ # fmt: off
1318
+ def set_fit_request(self, **kwargs): pass
1319
+ def set_partial_fit_request(self, **kwargs): pass
1320
+ def set_predict_request(self, **kwargs): pass
1321
+ def set_predict_proba_request(self, **kwargs): pass
1322
+ def set_predict_log_proba_request(self, **kwargs): pass
1323
+ def set_decision_function_request(self, **kwargs): pass
1324
+ def set_score_request(self, **kwargs): pass
1325
+ def set_split_request(self, **kwargs): pass
1326
+ def set_transform_request(self, **kwargs): pass
1327
+ def set_inverse_transform_request(self, **kwargs): pass
1328
+ # fmt: on
1329
+
1330
+ def __init_subclass__(cls, **kwargs):
1331
+ """Set the ``set_{method}_request`` methods.
1332
+
1333
+ This uses PEP-487 [1]_ to set the ``set_{method}_request`` methods. It
1334
+ looks for the information available in the set default values which are
1335
+ set using ``__metadata_request__*`` class attributes, or inferred
1336
+ from method signatures.
1337
+
1338
+ The ``__metadata_request__*`` class attributes are used when a method
1339
+ does not explicitly accept a metadata through its arguments or if the
1340
+ developer would like to specify a request value for those metadata
1341
+ which are different from the default ``None``.
1342
+
1343
+ References
1344
+ ----------
1345
+ .. [1] https://www.python.org/dev/peps/pep-0487
1346
+ """
1347
+ try:
1348
+ requests = cls._get_default_requests()
1349
+ except Exception:
1350
+ # if there are any issues in the default values, it will be raised
1351
+ # when ``get_metadata_routing`` is called. Here we are going to
1352
+ # ignore all the issues such as bad defaults etc.
1353
+ super().__init_subclass__(**kwargs)
1354
+ return
1355
+
1356
+ for method in SIMPLE_METHODS:
1357
+ mmr = getattr(requests, method)
1358
+ # set ``set_{method}_request``` methods
1359
+ if not len(mmr.requests):
1360
+ continue
1361
+ setattr(
1362
+ cls,
1363
+ f"set_{method}_request",
1364
+ RequestMethod(method, sorted(mmr.requests.keys())),
1365
+ )
1366
+ super().__init_subclass__(**kwargs)
1367
+
1368
+ @classmethod
1369
+ def _build_request_for_signature(cls, router, method):
1370
+ """Build the `MethodMetadataRequest` for a method using its signature.
1371
+
1372
+ This method takes all arguments from the method signature and uses
1373
+ ``None`` as their default request value, except ``X``, ``y``, ``Y``,
1374
+ ``Xt``, ``yt``, ``*args``, and ``**kwargs``.
1375
+
1376
+ Parameters
1377
+ ----------
1378
+ router : MetadataRequest
1379
+ The parent object for the created `MethodMetadataRequest`.
1380
+ method : str
1381
+ The name of the method.
1382
+
1383
+ Returns
1384
+ -------
1385
+ method_request : MethodMetadataRequest
1386
+ The prepared request using the method's signature.
1387
+ """
1388
+ mmr = MethodMetadataRequest(owner=cls.__name__, method=method)
1389
+ # Here we use `isfunction` instead of `ismethod` because calling `getattr`
1390
+ # on a class instead of an instance returns an unbound function.
1391
+ if not hasattr(cls, method) or not inspect.isfunction(getattr(cls, method)):
1392
+ return mmr
1393
+ # ignore the first parameter of the method, which is usually "self"
1394
+ params = list(inspect.signature(getattr(cls, method)).parameters.items())[1:]
1395
+ for pname, param in params:
1396
+ if pname in {"X", "y", "Y", "Xt", "yt"}:
1397
+ continue
1398
+ if param.kind in {param.VAR_POSITIONAL, param.VAR_KEYWORD}:
1399
+ continue
1400
+ mmr.add_request(
1401
+ param=pname,
1402
+ alias=None,
1403
+ )
1404
+ return mmr
1405
+
1406
+ @classmethod
1407
+ def _get_default_requests(cls):
1408
+ """Collect default request values.
1409
+
1410
+ This method combines the information present in ``__metadata_request__*``
1411
+ class attributes, as well as determining request keys from method
1412
+ signatures.
1413
+ """
1414
+ requests = MetadataRequest(owner=cls.__name__)
1415
+
1416
+ for method in SIMPLE_METHODS:
1417
+ setattr(
1418
+ requests,
1419
+ method,
1420
+ cls._build_request_for_signature(router=requests, method=method),
1421
+ )
1422
+
1423
+ # Then overwrite those defaults with the ones provided in
1424
+ # __metadata_request__* attributes. Defaults set in
1425
+ # __metadata_request__* attributes take precedence over signature
1426
+ # sniffing.
1427
+
1428
+ # need to go through the MRO since this is a class attribute and
1429
+ # ``vars`` doesn't report the parent class attributes. We go through
1430
+ # the reverse of the MRO so that child classes have precedence over
1431
+ # their parents.
1432
+ defaults = dict()
1433
+ for base_class in reversed(inspect.getmro(cls)):
1434
+ base_defaults = {
1435
+ attr: value
1436
+ for attr, value in vars(base_class).items()
1437
+ if "__metadata_request__" in attr
1438
+ }
1439
+ defaults.update(base_defaults)
1440
+ defaults = dict(sorted(defaults.items()))
1441
+
1442
+ for attr, value in defaults.items():
1443
+ # we don't check for attr.startswith() since python prefixes attrs
1444
+ # starting with __ with the `_ClassName`.
1445
+ substr = "__metadata_request__"
1446
+ method = attr[attr.index(substr) + len(substr) :]
1447
+ for prop, alias in value.items():
1448
+ getattr(requests, method).add_request(param=prop, alias=alias)
1449
+
1450
+ return requests
1451
+
1452
+ def _get_metadata_request(self):
1453
+ """Get requested data properties.
1454
+
1455
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1456
+ mechanism works.
1457
+
1458
+ Returns
1459
+ -------
1460
+ request : MetadataRequest
1461
+ A :class:`~sklearn.utils.metadata_routing.MetadataRequest` instance.
1462
+ """
1463
+ if hasattr(self, "_metadata_request"):
1464
+ requests = get_routing_for_object(self._metadata_request)
1465
+ else:
1466
+ requests = self._get_default_requests()
1467
+
1468
+ return requests
1469
+
1470
+ def get_metadata_routing(self):
1471
+ """Get metadata routing of this object.
1472
+
1473
+ Please check :ref:`User Guide <metadata_routing>` on how the routing
1474
+ mechanism works.
1475
+
1476
+ Returns
1477
+ -------
1478
+ routing : MetadataRequest
1479
+ A :class:`~sklearn.utils.metadata_routing.MetadataRequest` encapsulating
1480
+ routing information.
1481
+ """
1482
+ return self._get_metadata_request()
1483
+
1484
+
1485
+ # Process Routing in Routers
1486
+ # ==========================
1487
+ # This is almost always the only method used in routers to process and route
1488
+ # given metadata. This is to minimize the boilerplate required in routers.
1489
+
1490
+
1491
+ # Here the first two arguments are positional only which makes everything
1492
+ # passed as keyword argument a metadata. The first two args also have an `_`
1493
+ # prefix to reduce the chances of name collisions with the passed metadata, and
1494
+ # since they're positional only, users will never type those underscores.
1495
+ def process_routing(_obj, _method, /, **kwargs):
1496
+ """Validate and route input parameters.
1497
+
1498
+ This function is used inside a router's method, e.g. :term:`fit`,
1499
+ to validate the metadata and handle the routing.
1500
+
1501
+ Assuming this signature: ``fit(self, X, y, sample_weight=None, **fit_params)``,
1502
+ a call to this function would be:
1503
+ ``process_routing(self, sample_weight=sample_weight, **fit_params)``.
1504
+
1505
+ Note that if routing is not enabled and ``kwargs`` is empty, then it
1506
+ returns an empty routing where ``process_routing(...).ANYTHING.ANY_METHOD``
1507
+ is always an empty dictionary.
1508
+
1509
+ .. versionadded:: 1.3
1510
+
1511
+ Parameters
1512
+ ----------
1513
+ _obj : object
1514
+ An object implementing ``get_metadata_routing``. Typically a
1515
+ meta-estimator.
1516
+
1517
+ _method : str
1518
+ The name of the router's method in which this function is called.
1519
+
1520
+ **kwargs : dict
1521
+ Metadata to be routed.
1522
+
1523
+ Returns
1524
+ -------
1525
+ routed_params : Bunch
1526
+ A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name":
1527
+ {prop: value}}}`` which can be used to pass the required metadata to
1528
+ corresponding methods or corresponding child objects. The object names
1529
+ are those defined in `obj.get_metadata_routing()`.
1530
+ """
1531
+ if not kwargs:
1532
+ # If routing is not enabled and kwargs are empty, then we don't have to
1533
+ # try doing any routing, we can simply return a structure which returns
1534
+ # an empty dict on routed_params.ANYTHING.ANY_METHOD.
1535
+ class EmptyRequest:
1536
+ def get(self, name, default=None):
1537
+ return Bunch(**{method: dict() for method in METHODS})
1538
+
1539
+ def __getitem__(self, name):
1540
+ return Bunch(**{method: dict() for method in METHODS})
1541
+
1542
+ def __getattr__(self, name):
1543
+ return Bunch(**{method: dict() for method in METHODS})
1544
+
1545
+ return EmptyRequest()
1546
+
1547
+ if not (hasattr(_obj, "get_metadata_routing") or isinstance(_obj, MetadataRouter)):
1548
+ raise AttributeError(
1549
+ f"The given object ({repr(_obj.__class__.__name__)}) needs to either"
1550
+ " implement the routing method `get_metadata_routing` or be a"
1551
+ " `MetadataRouter` instance."
1552
+ )
1553
+ if _method not in METHODS:
1554
+ raise TypeError(
1555
+ f"Can only route and process input on these methods: {METHODS}, "
1556
+ f"while the passed method is: {_method}."
1557
+ )
1558
+
1559
+ request_routing = get_routing_for_object(_obj)
1560
+ request_routing.validate_metadata(params=kwargs, method=_method)
1561
+ routed_params = request_routing.route_params(params=kwargs, caller=_method)
1562
+
1563
+ return routed_params
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (80.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Helpers to safely access OpenMP routines
2
+ #
3
+ # no-op implementations are provided for the case where OpenMP is not available.
4
+ #
5
+ # All calls to OpenMP routines should be cimported from this module.
6
+
7
+ cdef extern from *:
8
+ """
9
+ #ifdef _OPENMP
10
+ #include <omp.h>
11
+ #define SKLEARN_OPENMP_PARALLELISM_ENABLED 1
12
+ #else
13
+ #define SKLEARN_OPENMP_PARALLELISM_ENABLED 0
14
+ #define omp_lock_t int
15
+ #define omp_init_lock(l) (void)0
16
+ #define omp_destroy_lock(l) (void)0
17
+ #define omp_set_lock(l) (void)0
18
+ #define omp_unset_lock(l) (void)0
19
+ #define omp_get_thread_num() 0
20
+ #define omp_get_max_threads() 1
21
+ #endif
22
+ """
23
+ bint SKLEARN_OPENMP_PARALLELISM_ENABLED
24
+
25
+ ctypedef struct omp_lock_t:
26
+ pass
27
+
28
+ void omp_init_lock(omp_lock_t*) noexcept nogil
29
+ void omp_destroy_lock(omp_lock_t*) noexcept nogil
30
+ void omp_set_lock(omp_lock_t*) noexcept nogil
31
+ void omp_unset_lock(omp_lock_t*) noexcept nogil
32
+ int omp_get_thread_num() noexcept nogil
33
+ int omp_get_max_threads() noexcept nogil
env-llmeval/lib/python3.10/site-packages/sklearn/utils/_param_validation.py ADDED
@@ -0,0 +1,905 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import math
3
+ import operator
4
+ import re
5
+ from abc import ABC, abstractmethod
6
+ from collections.abc import Iterable
7
+ from inspect import signature
8
+ from numbers import Integral, Real
9
+
10
+ import numpy as np
11
+ from scipy.sparse import csr_matrix, issparse
12
+
13
+ from .._config import config_context, get_config
14
+ from .validation import _is_arraylike_not_scalar
15
+
16
+
17
+ class InvalidParameterError(ValueError, TypeError):
18
+ """Custom exception to be raised when the parameter of a class/method/function
19
+ does not have a valid type or value.
20
+ """
21
+
22
+ # Inherits from ValueError and TypeError to keep backward compatibility.
23
+
24
+
25
+ def validate_parameter_constraints(parameter_constraints, params, caller_name):
26
+ """Validate types and values of given parameters.
27
+
28
+ Parameters
29
+ ----------
30
+ parameter_constraints : dict or {"no_validation"}
31
+ If "no_validation", validation is skipped for this parameter.
32
+
33
+ If a dict, it must be a dictionary `param_name: list of constraints`.
34
+ A parameter is valid if it satisfies one of the constraints from the list.
35
+ Constraints can be:
36
+ - an Interval object, representing a continuous or discrete range of numbers
37
+ - the string "array-like"
38
+ - the string "sparse matrix"
39
+ - the string "random_state"
40
+ - callable
41
+ - None, meaning that None is a valid value for the parameter
42
+ - any type, meaning that any instance of this type is valid
43
+ - an Options object, representing a set of elements of a given type
44
+ - a StrOptions object, representing a set of strings
45
+ - the string "boolean"
46
+ - the string "verbose"
47
+ - the string "cv_object"
48
+ - the string "nan"
49
+ - a MissingValues object representing markers for missing values
50
+ - a HasMethods object, representing method(s) an object must have
51
+ - a Hidden object, representing a constraint not meant to be exposed to the user
52
+
53
+ params : dict
54
+ A dictionary `param_name: param_value`. The parameters to validate against the
55
+ constraints.
56
+
57
+ caller_name : str
58
+ The name of the estimator or function or method that called this function.
59
+ """
60
+ for param_name, param_val in params.items():
61
+ # We allow parameters to not have a constraint so that third party estimators
62
+ # can inherit from sklearn estimators without having to necessarily use the
63
+ # validation tools.
64
+ if param_name not in parameter_constraints:
65
+ continue
66
+
67
+ constraints = parameter_constraints[param_name]
68
+
69
+ if constraints == "no_validation":
70
+ continue
71
+
72
+ constraints = [make_constraint(constraint) for constraint in constraints]
73
+
74
+ for constraint in constraints:
75
+ if constraint.is_satisfied_by(param_val):
76
+ # this constraint is satisfied, no need to check further.
77
+ break
78
+ else:
79
+ # No constraint is satisfied, raise with an informative message.
80
+
81
+ # Ignore constraints that we don't want to expose in the error message,
82
+ # i.e. options that are for internal purpose or not officially supported.
83
+ constraints = [
84
+ constraint for constraint in constraints if not constraint.hidden
85
+ ]
86
+
87
+ if len(constraints) == 1:
88
+ constraints_str = f"{constraints[0]}"
89
+ else:
90
+ constraints_str = (
91
+ f"{', '.join([str(c) for c in constraints[:-1]])} or"
92
+ f" {constraints[-1]}"
93
+ )
94
+
95
+ raise InvalidParameterError(
96
+ f"The {param_name!r} parameter of {caller_name} must be"
97
+ f" {constraints_str}. Got {param_val!r} instead."
98
+ )
99
+
100
+
101
+ def make_constraint(constraint):
102
+ """Convert the constraint into the appropriate Constraint object.
103
+
104
+ Parameters
105
+ ----------
106
+ constraint : object
107
+ The constraint to convert.
108
+
109
+ Returns
110
+ -------
111
+ constraint : instance of _Constraint
112
+ The converted constraint.
113
+ """
114
+ if isinstance(constraint, str) and constraint == "array-like":
115
+ return _ArrayLikes()
116
+ if isinstance(constraint, str) and constraint == "sparse matrix":
117
+ return _SparseMatrices()
118
+ if isinstance(constraint, str) and constraint == "random_state":
119
+ return _RandomStates()
120
+ if constraint is callable:
121
+ return _Callables()
122
+ if constraint is None:
123
+ return _NoneConstraint()
124
+ if isinstance(constraint, type):
125
+ return _InstancesOf(constraint)
126
+ if isinstance(
127
+ constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)
128
+ ):
129
+ return constraint
130
+ if isinstance(constraint, str) and constraint == "boolean":
131
+ return _Booleans()
132
+ if isinstance(constraint, str) and constraint == "verbose":
133
+ return _VerboseHelper()
134
+ if isinstance(constraint, str) and constraint == "cv_object":
135
+ return _CVObjects()
136
+ if isinstance(constraint, Hidden):
137
+ constraint = make_constraint(constraint.constraint)
138
+ constraint.hidden = True
139
+ return constraint
140
+ if isinstance(constraint, str) and constraint == "nan":
141
+ return _NanConstraint()
142
+ raise ValueError(f"Unknown constraint type: {constraint}")
143
+
144
+
145
+ def validate_params(parameter_constraints, *, prefer_skip_nested_validation):
146
+ """Decorator to validate types and values of functions and methods.
147
+
148
+ Parameters
149
+ ----------
150
+ parameter_constraints : dict
151
+ A dictionary `param_name: list of constraints`. See the docstring of
152
+ `validate_parameter_constraints` for a description of the accepted constraints.
153
+
154
+ Note that the *args and **kwargs parameters are not validated and must not be
155
+ present in the parameter_constraints dictionary.
156
+
157
+ prefer_skip_nested_validation : bool
158
+ If True, the validation of parameters of inner estimators or functions
159
+ called by the decorated function will be skipped.
160
+
161
+ This is useful to avoid validating many times the parameters passed by the
162
+ user from the public facing API. It's also useful to avoid validating
163
+ parameters that we pass internally to inner functions that are guaranteed to
164
+ be valid by the test suite.
165
+
166
+ It should be set to True for most functions, except for those that receive
167
+ non-validated objects as parameters or that are just wrappers around classes
168
+ because they only perform a partial validation.
169
+
170
+ Returns
171
+ -------
172
+ decorated_function : function or method
173
+ The decorated function.
174
+ """
175
+
176
+ def decorator(func):
177
+ # The dict of parameter constraints is set as an attribute of the function
178
+ # to make it possible to dynamically introspect the constraints for
179
+ # automatic testing.
180
+ setattr(func, "_skl_parameter_constraints", parameter_constraints)
181
+
182
+ @functools.wraps(func)
183
+ def wrapper(*args, **kwargs):
184
+ global_skip_validation = get_config()["skip_parameter_validation"]
185
+ if global_skip_validation:
186
+ return func(*args, **kwargs)
187
+
188
+ func_sig = signature(func)
189
+
190
+ # Map *args/**kwargs to the function signature
191
+ params = func_sig.bind(*args, **kwargs)
192
+ params.apply_defaults()
193
+
194
+ # ignore self/cls and positional/keyword markers
195
+ to_ignore = [
196
+ p.name
197
+ for p in func_sig.parameters.values()
198
+ if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
199
+ ]
200
+ to_ignore += ["self", "cls"]
201
+ params = {k: v for k, v in params.arguments.items() if k not in to_ignore}
202
+
203
+ validate_parameter_constraints(
204
+ parameter_constraints, params, caller_name=func.__qualname__
205
+ )
206
+
207
+ try:
208
+ with config_context(
209
+ skip_parameter_validation=(
210
+ prefer_skip_nested_validation or global_skip_validation
211
+ )
212
+ ):
213
+ return func(*args, **kwargs)
214
+ except InvalidParameterError as e:
215
+ # When the function is just a wrapper around an estimator, we allow
216
+ # the function to delegate validation to the estimator, but we replace
217
+ # the name of the estimator by the name of the function in the error
218
+ # message to avoid confusion.
219
+ msg = re.sub(
220
+ r"parameter of \w+ must be",
221
+ f"parameter of {func.__qualname__} must be",
222
+ str(e),
223
+ )
224
+ raise InvalidParameterError(msg) from e
225
+
226
+ return wrapper
227
+
228
+ return decorator
229
+
230
+
231
+ class RealNotInt(Real):
232
+ """A type that represents reals that are not instances of int.
233
+
234
+ Behaves like float, but also works with values extracted from numpy arrays.
235
+ isintance(1, RealNotInt) -> False
236
+ isinstance(1.0, RealNotInt) -> True
237
+ """
238
+
239
+
240
+ RealNotInt.register(float)
241
+
242
+
243
+ def _type_name(t):
244
+ """Convert type into human readable string."""
245
+ module = t.__module__
246
+ qualname = t.__qualname__
247
+ if module == "builtins":
248
+ return qualname
249
+ elif t == Real:
250
+ return "float"
251
+ elif t == Integral:
252
+ return "int"
253
+ return f"{module}.{qualname}"
254
+
255
+
256
+ class _Constraint(ABC):
257
+ """Base class for the constraint objects."""
258
+
259
+ def __init__(self):
260
+ self.hidden = False
261
+
262
+ @abstractmethod
263
+ def is_satisfied_by(self, val):
264
+ """Whether or not a value satisfies the constraint.
265
+
266
+ Parameters
267
+ ----------
268
+ val : object
269
+ The value to check.
270
+
271
+ Returns
272
+ -------
273
+ is_satisfied : bool
274
+ Whether or not the constraint is satisfied by this value.
275
+ """
276
+
277
+ @abstractmethod
278
+ def __str__(self):
279
+ """A human readable representational string of the constraint."""
280
+
281
+
282
+ class _InstancesOf(_Constraint):
283
+ """Constraint representing instances of a given type.
284
+
285
+ Parameters
286
+ ----------
287
+ type : type
288
+ The valid type.
289
+ """
290
+
291
+ def __init__(self, type):
292
+ super().__init__()
293
+ self.type = type
294
+
295
+ def is_satisfied_by(self, val):
296
+ return isinstance(val, self.type)
297
+
298
+ def __str__(self):
299
+ return f"an instance of {_type_name(self.type)!r}"
300
+
301
+
302
+ class _NoneConstraint(_Constraint):
303
+ """Constraint representing the None singleton."""
304
+
305
+ def is_satisfied_by(self, val):
306
+ return val is None
307
+
308
+ def __str__(self):
309
+ return "None"
310
+
311
+
312
+ class _NanConstraint(_Constraint):
313
+ """Constraint representing the indicator `np.nan`."""
314
+
315
+ def is_satisfied_by(self, val):
316
+ return (
317
+ not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val)
318
+ )
319
+
320
+ def __str__(self):
321
+ return "numpy.nan"
322
+
323
+
324
+ class _PandasNAConstraint(_Constraint):
325
+ """Constraint representing the indicator `pd.NA`."""
326
+
327
+ def is_satisfied_by(self, val):
328
+ try:
329
+ import pandas as pd
330
+
331
+ return isinstance(val, type(pd.NA)) and pd.isna(val)
332
+ except ImportError:
333
+ return False
334
+
335
+ def __str__(self):
336
+ return "pandas.NA"
337
+
338
+
339
+ class Options(_Constraint):
340
+ """Constraint representing a finite set of instances of a given type.
341
+
342
+ Parameters
343
+ ----------
344
+ type : type
345
+
346
+ options : set
347
+ The set of valid scalars.
348
+
349
+ deprecated : set or None, default=None
350
+ A subset of the `options` to mark as deprecated in the string
351
+ representation of the constraint.
352
+ """
353
+
354
+ def __init__(self, type, options, *, deprecated=None):
355
+ super().__init__()
356
+ self.type = type
357
+ self.options = options
358
+ self.deprecated = deprecated or set()
359
+
360
+ if self.deprecated - self.options:
361
+ raise ValueError("The deprecated options must be a subset of the options.")
362
+
363
+ def is_satisfied_by(self, val):
364
+ return isinstance(val, self.type) and val in self.options
365
+
366
+ def _mark_if_deprecated(self, option):
367
+ """Add a deprecated mark to an option if needed."""
368
+ option_str = f"{option!r}"
369
+ if option in self.deprecated:
370
+ option_str = f"{option_str} (deprecated)"
371
+ return option_str
372
+
373
+ def __str__(self):
374
+ options_str = (
375
+ f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}"
376
+ )
377
+ return f"a {_type_name(self.type)} among {{{options_str}}}"
378
+
379
+
380
+ class StrOptions(Options):
381
+ """Constraint representing a finite set of strings.
382
+
383
+ Parameters
384
+ ----------
385
+ options : set of str
386
+ The set of valid strings.
387
+
388
+ deprecated : set of str or None, default=None
389
+ A subset of the `options` to mark as deprecated in the string
390
+ representation of the constraint.
391
+ """
392
+
393
+ def __init__(self, options, *, deprecated=None):
394
+ super().__init__(type=str, options=options, deprecated=deprecated)
395
+
396
+
397
+ class Interval(_Constraint):
398
+ """Constraint representing a typed interval.
399
+
400
+ Parameters
401
+ ----------
402
+ type : {numbers.Integral, numbers.Real, RealNotInt}
403
+ The set of numbers in which to set the interval.
404
+
405
+ If RealNotInt, only reals that don't have the integer type
406
+ are allowed. For example 1.0 is allowed but 1 is not.
407
+
408
+ left : float or int or None
409
+ The left bound of the interval. None means left bound is -∞.
410
+
411
+ right : float, int or None
412
+ The right bound of the interval. None means right bound is +∞.
413
+
414
+ closed : {"left", "right", "both", "neither"}
415
+ Whether the interval is open or closed. Possible choices are:
416
+
417
+ - `"left"`: the interval is closed on the left and open on the right.
418
+ It is equivalent to the interval `[ left, right )`.
419
+ - `"right"`: the interval is closed on the right and open on the left.
420
+ It is equivalent to the interval `( left, right ]`.
421
+ - `"both"`: the interval is closed.
422
+ It is equivalent to the interval `[ left, right ]`.
423
+ - `"neither"`: the interval is open.
424
+ It is equivalent to the interval `( left, right )`.
425
+
426
+ Notes
427
+ -----
428
+ Setting a bound to `None` and setting the interval closed is valid. For instance,
429
+ strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to
430
+ `[0, +∞) U {+∞}`.
431
+ """
432
+
433
+ def __init__(self, type, left, right, *, closed):
434
+ super().__init__()
435
+ self.type = type
436
+ self.left = left
437
+ self.right = right
438
+ self.closed = closed
439
+
440
+ self._check_params()
441
+
442
+ def _check_params(self):
443
+ if self.type not in (Integral, Real, RealNotInt):
444
+ raise ValueError(
445
+ "type must be either numbers.Integral, numbers.Real or RealNotInt."
446
+ f" Got {self.type} instead."
447
+ )
448
+
449
+ if self.closed not in ("left", "right", "both", "neither"):
450
+ raise ValueError(
451
+ "closed must be either 'left', 'right', 'both' or 'neither'. "
452
+ f"Got {self.closed} instead."
453
+ )
454
+
455
+ if self.type is Integral:
456
+ suffix = "for an interval over the integers."
457
+ if self.left is not None and not isinstance(self.left, Integral):
458
+ raise TypeError(f"Expecting left to be an int {suffix}")
459
+ if self.right is not None and not isinstance(self.right, Integral):
460
+ raise TypeError(f"Expecting right to be an int {suffix}")
461
+ if self.left is None and self.closed in ("left", "both"):
462
+ raise ValueError(
463
+ f"left can't be None when closed == {self.closed} {suffix}"
464
+ )
465
+ if self.right is None and self.closed in ("right", "both"):
466
+ raise ValueError(
467
+ f"right can't be None when closed == {self.closed} {suffix}"
468
+ )
469
+ else:
470
+ if self.left is not None and not isinstance(self.left, Real):
471
+ raise TypeError("Expecting left to be a real number.")
472
+ if self.right is not None and not isinstance(self.right, Real):
473
+ raise TypeError("Expecting right to be a real number.")
474
+
475
+ if self.right is not None and self.left is not None and self.right <= self.left:
476
+ raise ValueError(
477
+ f"right can't be less than left. Got left={self.left} and "
478
+ f"right={self.right}"
479
+ )
480
+
481
+ def __contains__(self, val):
482
+ if not isinstance(val, Integral) and np.isnan(val):
483
+ return False
484
+
485
+ left_cmp = operator.lt if self.closed in ("left", "both") else operator.le
486
+ right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge
487
+
488
+ left = -np.inf if self.left is None else self.left
489
+ right = np.inf if self.right is None else self.right
490
+
491
+ if left_cmp(val, left):
492
+ return False
493
+ if right_cmp(val, right):
494
+ return False
495
+ return True
496
+
497
+ def is_satisfied_by(self, val):
498
+ if not isinstance(val, self.type):
499
+ return False
500
+
501
+ return val in self
502
+
503
+ def __str__(self):
504
+ type_str = "an int" if self.type is Integral else "a float"
505
+ left_bracket = "[" if self.closed in ("left", "both") else "("
506
+ left_bound = "-inf" if self.left is None else self.left
507
+ right_bound = "inf" if self.right is None else self.right
508
+ right_bracket = "]" if self.closed in ("right", "both") else ")"
509
+
510
+ # better repr if the bounds were given as integers
511
+ if not self.type == Integral and isinstance(self.left, Real):
512
+ left_bound = float(left_bound)
513
+ if not self.type == Integral and isinstance(self.right, Real):
514
+ right_bound = float(right_bound)
515
+
516
+ return (
517
+ f"{type_str} in the range "
518
+ f"{left_bracket}{left_bound}, {right_bound}{right_bracket}"
519
+ )
520
+
521
+
522
+ class _ArrayLikes(_Constraint):
523
+ """Constraint representing array-likes"""
524
+
525
+ def is_satisfied_by(self, val):
526
+ return _is_arraylike_not_scalar(val)
527
+
528
+ def __str__(self):
529
+ return "an array-like"
530
+
531
+
532
+ class _SparseMatrices(_Constraint):
533
+ """Constraint representing sparse matrices."""
534
+
535
+ def is_satisfied_by(self, val):
536
+ return issparse(val)
537
+
538
+ def __str__(self):
539
+ return "a sparse matrix"
540
+
541
+
542
+ class _Callables(_Constraint):
543
+ """Constraint representing callables."""
544
+
545
+ def is_satisfied_by(self, val):
546
+ return callable(val)
547
+
548
+ def __str__(self):
549
+ return "a callable"
550
+
551
+
552
+ class _RandomStates(_Constraint):
553
+ """Constraint representing random states.
554
+
555
+ Convenience class for
556
+ [Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None]
557
+ """
558
+
559
+ def __init__(self):
560
+ super().__init__()
561
+ self._constraints = [
562
+ Interval(Integral, 0, 2**32 - 1, closed="both"),
563
+ _InstancesOf(np.random.RandomState),
564
+ _NoneConstraint(),
565
+ ]
566
+
567
+ def is_satisfied_by(self, val):
568
+ return any(c.is_satisfied_by(val) for c in self._constraints)
569
+
570
+ def __str__(self):
571
+ return (
572
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
573
+ f" {self._constraints[-1]}"
574
+ )
575
+
576
+
577
+ class _Booleans(_Constraint):
578
+ """Constraint representing boolean likes.
579
+
580
+ Convenience class for
581
+ [bool, np.bool_, Integral (deprecated)]
582
+ """
583
+
584
+ def __init__(self):
585
+ super().__init__()
586
+ self._constraints = [
587
+ _InstancesOf(bool),
588
+ _InstancesOf(np.bool_),
589
+ ]
590
+
591
+ def is_satisfied_by(self, val):
592
+ return any(c.is_satisfied_by(val) for c in self._constraints)
593
+
594
+ def __str__(self):
595
+ return (
596
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
597
+ f" {self._constraints[-1]}"
598
+ )
599
+
600
+
601
+ class _VerboseHelper(_Constraint):
602
+ """Helper constraint for the verbose parameter.
603
+
604
+ Convenience class for
605
+ [Interval(Integral, 0, None, closed="left"), bool, numpy.bool_]
606
+ """
607
+
608
+ def __init__(self):
609
+ super().__init__()
610
+ self._constraints = [
611
+ Interval(Integral, 0, None, closed="left"),
612
+ _InstancesOf(bool),
613
+ _InstancesOf(np.bool_),
614
+ ]
615
+
616
+ def is_satisfied_by(self, val):
617
+ return any(c.is_satisfied_by(val) for c in self._constraints)
618
+
619
+ def __str__(self):
620
+ return (
621
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
622
+ f" {self._constraints[-1]}"
623
+ )
624
+
625
+
626
+ class MissingValues(_Constraint):
627
+ """Helper constraint for the `missing_values` parameters.
628
+
629
+ Convenience for
630
+ [
631
+ Integral,
632
+ Interval(Real, None, None, closed="both"),
633
+ str, # when numeric_only is False
634
+ None, # when numeric_only is False
635
+ _NanConstraint(),
636
+ _PandasNAConstraint(),
637
+ ]
638
+
639
+ Parameters
640
+ ----------
641
+ numeric_only : bool, default=False
642
+ Whether to consider only numeric missing value markers.
643
+
644
+ """
645
+
646
+ def __init__(self, numeric_only=False):
647
+ super().__init__()
648
+
649
+ self.numeric_only = numeric_only
650
+
651
+ self._constraints = [
652
+ _InstancesOf(Integral),
653
+ # we use an interval of Real to ignore np.nan that has its own constraint
654
+ Interval(Real, None, None, closed="both"),
655
+ _NanConstraint(),
656
+ _PandasNAConstraint(),
657
+ ]
658
+ if not self.numeric_only:
659
+ self._constraints.extend([_InstancesOf(str), _NoneConstraint()])
660
+
661
+ def is_satisfied_by(self, val):
662
+ return any(c.is_satisfied_by(val) for c in self._constraints)
663
+
664
+ def __str__(self):
665
+ return (
666
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
667
+ f" {self._constraints[-1]}"
668
+ )
669
+
670
+
671
+ class HasMethods(_Constraint):
672
+ """Constraint representing objects that expose specific methods.
673
+
674
+ It is useful for parameters following a protocol and where we don't want to impose
675
+ an affiliation to a specific module or class.
676
+
677
+ Parameters
678
+ ----------
679
+ methods : str or list of str
680
+ The method(s) that the object is expected to expose.
681
+ """
682
+
683
+ @validate_params(
684
+ {"methods": [str, list]},
685
+ prefer_skip_nested_validation=True,
686
+ )
687
+ def __init__(self, methods):
688
+ super().__init__()
689
+ if isinstance(methods, str):
690
+ methods = [methods]
691
+ self.methods = methods
692
+
693
+ def is_satisfied_by(self, val):
694
+ return all(callable(getattr(val, method, None)) for method in self.methods)
695
+
696
+ def __str__(self):
697
+ if len(self.methods) == 1:
698
+ methods = f"{self.methods[0]!r}"
699
+ else:
700
+ methods = (
701
+ f"{', '.join([repr(m) for m in self.methods[:-1]])} and"
702
+ f" {self.methods[-1]!r}"
703
+ )
704
+ return f"an object implementing {methods}"
705
+
706
+
707
+ class _IterablesNotString(_Constraint):
708
+ """Constraint representing iterables that are not strings."""
709
+
710
+ def is_satisfied_by(self, val):
711
+ return isinstance(val, Iterable) and not isinstance(val, str)
712
+
713
+ def __str__(self):
714
+ return "an iterable"
715
+
716
+
717
+ class _CVObjects(_Constraint):
718
+ """Constraint representing cv objects.
719
+
720
+ Convenient class for
721
+ [
722
+ Interval(Integral, 2, None, closed="left"),
723
+ HasMethods(["split", "get_n_splits"]),
724
+ _IterablesNotString(),
725
+ None,
726
+ ]
727
+ """
728
+
729
+ def __init__(self):
730
+ super().__init__()
731
+ self._constraints = [
732
+ Interval(Integral, 2, None, closed="left"),
733
+ HasMethods(["split", "get_n_splits"]),
734
+ _IterablesNotString(),
735
+ _NoneConstraint(),
736
+ ]
737
+
738
+ def is_satisfied_by(self, val):
739
+ return any(c.is_satisfied_by(val) for c in self._constraints)
740
+
741
+ def __str__(self):
742
+ return (
743
+ f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
744
+ f" {self._constraints[-1]}"
745
+ )
746
+
747
+
748
+ class Hidden:
749
+ """Class encapsulating a constraint not meant to be exposed to the user.
750
+
751
+ Parameters
752
+ ----------
753
+ constraint : str or _Constraint instance
754
+ The constraint to be used internally.
755
+ """
756
+
757
+ def __init__(self, constraint):
758
+ self.constraint = constraint
759
+
760
+
761
+ def generate_invalid_param_val(constraint):
762
+ """Return a value that does not satisfy the constraint.
763
+
764
+ Raises a NotImplementedError if there exists no invalid value for this constraint.
765
+
766
+ This is only useful for testing purpose.
767
+
768
+ Parameters
769
+ ----------
770
+ constraint : _Constraint instance
771
+ The constraint to generate a value for.
772
+
773
+ Returns
774
+ -------
775
+ val : object
776
+ A value that does not satisfy the constraint.
777
+ """
778
+ if isinstance(constraint, StrOptions):
779
+ return f"not {' or '.join(constraint.options)}"
780
+
781
+ if isinstance(constraint, MissingValues):
782
+ return np.array([1, 2, 3])
783
+
784
+ if isinstance(constraint, _VerboseHelper):
785
+ return -1
786
+
787
+ if isinstance(constraint, HasMethods):
788
+ return type("HasNotMethods", (), {})()
789
+
790
+ if isinstance(constraint, _IterablesNotString):
791
+ return "a string"
792
+
793
+ if isinstance(constraint, _CVObjects):
794
+ return "not a cv object"
795
+
796
+ if isinstance(constraint, Interval) and constraint.type is Integral:
797
+ if constraint.left is not None:
798
+ return constraint.left - 1
799
+ if constraint.right is not None:
800
+ return constraint.right + 1
801
+
802
+ # There's no integer outside (-inf, +inf)
803
+ raise NotImplementedError
804
+
805
+ if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):
806
+ if constraint.left is not None:
807
+ return constraint.left - 1e-6
808
+ if constraint.right is not None:
809
+ return constraint.right + 1e-6
810
+
811
+ # bounds are -inf, +inf
812
+ if constraint.closed in ("right", "neither"):
813
+ return -np.inf
814
+ if constraint.closed in ("left", "neither"):
815
+ return np.inf
816
+
817
+ # interval is [-inf, +inf]
818
+ return np.nan
819
+
820
+ raise NotImplementedError
821
+
822
+
823
+ def generate_valid_param(constraint):
824
+ """Return a value that does satisfy a constraint.
825
+
826
+ This is only useful for testing purpose.
827
+
828
+ Parameters
829
+ ----------
830
+ constraint : Constraint instance
831
+ The constraint to generate a value for.
832
+
833
+ Returns
834
+ -------
835
+ val : object
836
+ A value that does satisfy the constraint.
837
+ """
838
+ if isinstance(constraint, _ArrayLikes):
839
+ return np.array([1, 2, 3])
840
+
841
+ if isinstance(constraint, _SparseMatrices):
842
+ return csr_matrix([[0, 1], [1, 0]])
843
+
844
+ if isinstance(constraint, _RandomStates):
845
+ return np.random.RandomState(42)
846
+
847
+ if isinstance(constraint, _Callables):
848
+ return lambda x: x
849
+
850
+ if isinstance(constraint, _NoneConstraint):
851
+ return None
852
+
853
+ if isinstance(constraint, _InstancesOf):
854
+ if constraint.type is np.ndarray:
855
+ # special case for ndarray since it can't be instantiated without arguments
856
+ return np.array([1, 2, 3])
857
+
858
+ if constraint.type in (Integral, Real):
859
+ # special case for Integral and Real since they are abstract classes
860
+ return 1
861
+
862
+ return constraint.type()
863
+
864
+ if isinstance(constraint, _Booleans):
865
+ return True
866
+
867
+ if isinstance(constraint, _VerboseHelper):
868
+ return 1
869
+
870
+ if isinstance(constraint, MissingValues) and constraint.numeric_only:
871
+ return np.nan
872
+
873
+ if isinstance(constraint, MissingValues) and not constraint.numeric_only:
874
+ return "missing"
875
+
876
+ if isinstance(constraint, HasMethods):
877
+ return type(
878
+ "ValidHasMethods", (), {m: lambda self: None for m in constraint.methods}
879
+ )()
880
+
881
+ if isinstance(constraint, _IterablesNotString):
882
+ return [1, 2, 3]
883
+
884
+ if isinstance(constraint, _CVObjects):
885
+ return 5
886
+
887
+ if isinstance(constraint, Options): # includes StrOptions
888
+ for option in constraint.options:
889
+ return option
890
+
891
+ if isinstance(constraint, Interval):
892
+ interval = constraint
893
+ if interval.left is None and interval.right is None:
894
+ return 0
895
+ elif interval.left is None:
896
+ return interval.right - 1
897
+ elif interval.right is None:
898
+ return interval.left + 1
899
+ else:
900
+ if interval.type is Real:
901
+ return (interval.left + interval.right) / 2
902
+ else:
903
+ return interval.left + 1
904
+
905
+ raise ValueError(f"Unknown constraint type: {constraint}")