diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3db2c9f16a080a731156f5adafbe860f31408be Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee9134b1e4009e4a4ceff910f3dad0359d4e34a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/calibration.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92b0d3b5cc2d5a75066dc7d1a1a2b708355cf1fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/discriminant_analysis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351cbea0d4dac05457891a062ac42049b498a9ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/dummy.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ef545a60eda2dc1c5a2043733d1054ab9fc71c2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/isotonic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c72ac9fb9d4a4518a537530c8b630df3e116b8a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/kernel_ridge.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6adc45533deb8fa6bf5bd85b9279c2d0f80a779c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multiclass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f77423597af658ecdabceefd7bdb7ce3037d98f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/multioutput.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0083c82b35e53cf415dad7045c8140aad0bf6939 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/pipeline.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d759acb7f7122d8d74208c20f023029b7f3e1984 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/__pycache__/random_projection.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7d316d95ada4dcc0534851b08b221ae3270174d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__init__.py @@ -0,0 +1,88 @@ +import typing + +from ._plot import LearningCurveDisplay, ValidationCurveDisplay +from ._search import GridSearchCV, ParameterGrid, ParameterSampler, RandomizedSearchCV +from ._split import ( + BaseCrossValidator, + BaseShuffleSplit, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + LeavePOut, + PredefinedSplit, + RepeatedKFold, + RepeatedStratifiedKFold, + ShuffleSplit, + StratifiedGroupKFold, + StratifiedKFold, + StratifiedShuffleSplit, + TimeSeriesSplit, + check_cv, + train_test_split, +) +from ._validation import ( + cross_val_predict, + cross_val_score, + cross_validate, + learning_curve, + permutation_test_score, + validation_curve, +) + +if typing.TYPE_CHECKING: + # Avoid errors in type checkers (e.g. mypy) for experimental estimators. + # TODO: remove this check once the estimator is no longer experimental. + from ._search_successive_halving import ( # noqa + HalvingGridSearchCV, + HalvingRandomSearchCV, + ) + + +__all__ = [ + "BaseCrossValidator", + "BaseShuffleSplit", + "GridSearchCV", + "TimeSeriesSplit", + "KFold", + "GroupKFold", + "GroupShuffleSplit", + "LeaveOneGroupOut", + "LeaveOneOut", + "LeavePGroupsOut", + "LeavePOut", + "RepeatedKFold", + "RepeatedStratifiedKFold", + "ParameterGrid", + "ParameterSampler", + "PredefinedSplit", + "RandomizedSearchCV", + "ShuffleSplit", + "StratifiedKFold", + "StratifiedGroupKFold", + "StratifiedShuffleSplit", + "check_cv", + "cross_val_predict", + "cross_val_score", + "cross_validate", + "learning_curve", + "LearningCurveDisplay", + "permutation_test_score", + "train_test_split", + "validation_curve", + "ValidationCurveDisplay", +] + + +# TODO: remove this check once the estimator is no longer experimental. +def __getattr__(name): + if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}: + raise ImportError( + f"{name} is experimental and the API might change without any " + "deprecation cycle. To use it, you need to explicitly import " + "enable_halving_search_cv:\n" + "from sklearn.experimental import enable_halving_search_cv" + ) + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a64c34a512a28ed441f3dfff9e2b0506956a8682 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..653c16b1ee2c65f28c6f44a965b68bc794deb9a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_plot.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3401f3f92abcfe26265b4f4136fb457b2edf30e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96872e2b49463f6faf12b19ffe75aba7f8558a77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_search_successive_halving.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..929c62e45d71584976781ce6e5e923ce9c79b712 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_split.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..693668e59acf1bd703006d682a63f9003e0cc4ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/__pycache__/_validation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_plot.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..741c893ae2ea96db7dedc015afeb0b8d7cc9178a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_plot.py @@ -0,0 +1,907 @@ +import warnings + +import numpy as np + +from ..utils import check_matplotlib_support +from ..utils._plotting import _interval_max_min_ratio, _validate_score_name +from ._validation import learning_curve, validation_curve + + +class _BaseCurveDisplay: + def _plot_curve( + self, + x_data, + *, + ax=None, + negate_score=False, + score_name=None, + score_type="test", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + check_matplotlib_support(f"{self.__class__.__name__}.plot") + + import matplotlib.pyplot as plt + + if ax is None: + _, ax = plt.subplots() + + if negate_score: + train_scores, test_scores = -self.train_scores, -self.test_scores + else: + train_scores, test_scores = self.train_scores, self.test_scores + + if std_display_style not in ("errorbar", "fill_between", None): + raise ValueError( + f"Unknown std_display_style: {std_display_style}. Should be one of" + " 'errorbar', 'fill_between', or None." + ) + + if score_type not in ("test", "train", "both"): + raise ValueError( + f"Unknown score_type: {score_type}. Should be one of 'test', " + "'train', or 'both'." + ) + + if score_type == "train": + scores = {"Train": train_scores} + elif score_type == "test": + scores = {"Test": test_scores} + else: # score_type == "both" + scores = {"Train": train_scores, "Test": test_scores} + + if std_display_style in ("fill_between", None): + # plot the mean score + if line_kw is None: + line_kw = {} + + self.lines_ = [] + for line_label, score in scores.items(): + self.lines_.append( + *ax.plot( + x_data, + score.mean(axis=1), + label=line_label, + **line_kw, + ) + ) + self.errorbar_ = None + self.fill_between_ = None # overwritten below by fill_between + + if std_display_style == "errorbar": + if errorbar_kw is None: + errorbar_kw = {} + + self.errorbar_ = [] + for line_label, score in scores.items(): + self.errorbar_.append( + ax.errorbar( + x_data, + score.mean(axis=1), + score.std(axis=1), + label=line_label, + **errorbar_kw, + ) + ) + self.lines_, self.fill_between_ = None, None + elif std_display_style == "fill_between": + if fill_between_kw is None: + fill_between_kw = {} + default_fill_between_kw = {"alpha": 0.5} + fill_between_kw = {**default_fill_between_kw, **fill_between_kw} + + self.fill_between_ = [] + for line_label, score in scores.items(): + self.fill_between_.append( + ax.fill_between( + x_data, + score.mean(axis=1) - score.std(axis=1), + score.mean(axis=1) + score.std(axis=1), + **fill_between_kw, + ) + ) + + score_name = self.score_name if score_name is None else score_name + + ax.legend() + + # TODO(1.5): to be removed + if log_scale != "deprecated": + warnings.warn( + ( + "The `log_scale` parameter is deprecated as of version 1.3 " + "and will be removed in 1.5. You can use display.ax_.set_xscale " + "and display.ax_.set_yscale instead." + ), + FutureWarning, + ) + xscale = "log" if log_scale else "linear" + else: + # We found that a ratio, smaller or bigger than 5, between the largest and + # smallest gap of the x values is a good indicator to choose between linear + # and log scale. + if _interval_max_min_ratio(x_data) > 5: + xscale = "symlog" if x_data.min() <= 0 else "log" + else: + xscale = "linear" + ax.set_xscale(xscale) + ax.set_ylabel(f"{score_name}") + + self.ax_ = ax + self.figure_ = ax.figure + + +class LearningCurveDisplay(_BaseCurveDisplay): + """Learning Curve visualization. + + It is recommended to use + :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to + create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance. + All parameters are stored as attributes. + + Read more in the :ref:`User Guide ` for general information + about the visualization API and + :ref:`detailed documentation ` regarding the learning + curve visualization. + + .. versionadded:: 1.2 + + Parameters + ---------- + train_sizes : ndarray of shape (n_unique_ticks,) + Numbers of training examples that has been used to generate the + learning curve. + + train_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on test set. + + score_name : str, default=None + The name of the score used in `learning_curve`. It will override the name + inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if + `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a + string or a callable, we infer the name. We replace `_` by spaces and capitalize + the first letter. We remove `neg_` and replace it by `"Negative"` if + `negate_score` is `False` or just remove it otherwise. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the learning curve. + + figure_ : matplotlib Figure + Figure containing the learning curve. + + errorbar_ : list of matplotlib Artist or None + When the `std_display_style` is `"errorbar"`, this is a list of + `matplotlib.container.ErrorbarContainer` objects. If another style is + used, `errorbar_` is `None`. + + lines_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.lines.Line2D` objects corresponding to the mean train and + test scores. If another style is used, `line_` is `None`. + + fill_between_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.collections.PolyCollection` objects. If another style is + used, `fill_between_` is `None`. + + See Also + -------- + sklearn.model_selection.learning_curve : Compute the learning curve. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import LearningCurveDisplay, learning_curve + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> tree = DecisionTreeClassifier(random_state=0) + >>> train_sizes, train_scores, test_scores = learning_curve( + ... tree, X, y) + >>> display = LearningCurveDisplay(train_sizes=train_sizes, + ... train_scores=train_scores, test_scores=test_scores, score_name="Score") + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None): + self.train_sizes = train_sizes + self.train_scores = train_scores + self.test_scores = test_scores + self.score_name = score_name + + def plot( + self, + ax=None, + *, + negate_score=False, + score_name=None, + score_type="both", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.learning_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + log_scale : bool, default="deprecated" + Whether or not to use a logarithmic scale for the x-axis. + + .. deprecated:: 1.3 + `log_scale` is deprecated in 1.3 and will be removed in 1.5. + Use `display.ax_.set_xscale` and `display.ax_.set_yscale` instead. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If None, no standard deviation representation is + displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.LearningCurveDisplay` + Object that stores computed values. + """ + self._plot_curve( + self.train_sizes, + ax=ax, + negate_score=negate_score, + score_name=score_name, + score_type=score_type, + log_scale=log_scale, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + self.ax_.set_xlabel("Number of samples in the training set") + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + groups=None, + train_sizes=np.linspace(0.1, 1.0, 5), + cv=None, + scoring=None, + exploit_incremental_learning=False, + n_jobs=None, + pre_dispatch="all", + verbose=0, + shuffle=False, + random_state=None, + error_score=np.nan, + fit_params=None, + ax=None, + negate_score=False, + score_name=None, + score_type="both", + log_scale="deprecated", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Create a learning curve display from an estimator. + + Read more in the :ref:`User Guide ` for general + information about the visualization API and :ref:`detailed + documentation ` regarding the learning curve + visualization. + + Parameters + ---------- + estimator : object type that implements the "fit" and "predict" methods + An object of that type which is cloned for each validation. + + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + train_sizes : array-like of shape (n_ticks,), \ + default=np.linspace(0.1, 1.0, 5) + Relative or absolute numbers of training examples that will be used + to generate the learning curve. If the dtype is float, it is + regarded as a fraction of the maximum size of the training set + (that is determined by the selected validation method), i.e. it has + to be within (0, 1]. Otherwise it is interpreted as absolute sizes + of the training sets. Note that for classification the number of + samples usually have to be big enough to contain at least one + sample from each class. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and `y` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`~sklearn.model_selection.KFold` is used. These + splitters are instantiated with `shuffle=False` so the splits will + be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + `scorer(estimator, X, y)` (see :ref:`scoring`). + + exploit_incremental_learning : bool, default=False + If the estimator supports incremental learning, this will be + used to speed up fitting for different training set sizes. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + computing the score are parallelized over the different training + and test sets. `None` means 1 unless in a + :obj:`joblib.parallel_backend` context. `-1` means using all + processors. See :term:`Glossary ` for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + shuffle : bool, default=False + Whether to shuffle training data before taking prefixes of it + based on`train_sizes`. + + random_state : int, RandomState instance or None, default=None + Used when `shuffle` is True. Pass an int for reproducible + output across multiple function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator + fitting. If set to 'raise', the error is raised. If a numeric value + is given, FitFailedWarning is raised. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.learning_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + log_scale : bool, default="deprecated" + Whether or not to use a logarithmic scale for the x-axis. + + .. deprecated:: 1.3 + `log_scale` is deprecated in 1.3 and will be removed in 1.5. + Use `display.ax_.xscale` and `display.ax_.yscale` instead. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If `None`, no representation of the standard deviation + is displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.LearningCurveDisplay` + Object that stores computed values. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.model_selection import LearningCurveDisplay + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> tree = DecisionTreeClassifier(random_state=0) + >>> LearningCurveDisplay.from_estimator(tree, X, y) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + score_name = _validate_score_name(score_name, scoring, negate_score) + + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y, + groups=groups, + train_sizes=train_sizes, + cv=cv, + scoring=scoring, + exploit_incremental_learning=exploit_incremental_learning, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + verbose=verbose, + shuffle=shuffle, + random_state=random_state, + error_score=error_score, + return_times=False, + fit_params=fit_params, + ) + + viz = cls( + train_sizes=train_sizes, + train_scores=train_scores, + test_scores=test_scores, + score_name=score_name, + ) + return viz.plot( + ax=ax, + negate_score=negate_score, + score_type=score_type, + log_scale=log_scale, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + + +class ValidationCurveDisplay(_BaseCurveDisplay): + """Validation Curve visualization. + + It is recommended to use + :meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to + create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance. + All parameters are stored as attributes. + + Read more in the :ref:`User Guide ` for general information + about the visualization API and :ref:`detailed documentation + ` regarding the validation curve visualization. + + .. versionadded:: 1.3 + + Parameters + ---------- + param_name : str + Name of the parameter that has been varied. + + param_range : array-like of shape (n_ticks,) + The values of the parameter that have been evaluated. + + train_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : ndarray of shape (n_ticks, n_cv_folds) + Scores on test set. + + score_name : str, default=None + The name of the score used in `validation_curve`. It will override the name + inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if + `negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a + string or a callable, we infer the name. We replace `_` by spaces and capitalize + the first letter. We remove `neg_` and replace it by `"Negative"` if + `negate_score` is `False` or just remove it otherwise. + + Attributes + ---------- + ax_ : matplotlib Axes + Axes with the validation curve. + + figure_ : matplotlib Figure + Figure containing the validation curve. + + errorbar_ : list of matplotlib Artist or None + When the `std_display_style` is `"errorbar"`, this is a list of + `matplotlib.container.ErrorbarContainer` objects. If another style is + used, `errorbar_` is `None`. + + lines_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.lines.Line2D` objects corresponding to the mean train and + test scores. If another style is used, `line_` is `None`. + + fill_between_ : list of matplotlib Artist or None + When the `std_display_style` is `"fill_between"`, this is a list of + `matplotlib.collections.PolyCollection` objects. If another style is + used, `fill_between_` is `None`. + + See Also + -------- + sklearn.model_selection.validation_curve : Compute the validation curve. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> train_scores, test_scores = validation_curve( + ... logistic_regression, X, y, param_name=param_name, param_range=param_range + ... ) + >>> display = ValidationCurveDisplay( + ... param_name=param_name, param_range=param_range, + ... train_scores=train_scores, test_scores=test_scores, score_name="Score" + ... ) + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__( + self, *, param_name, param_range, train_scores, test_scores, score_name=None + ): + self.param_name = param_name + self.param_range = param_range + self.train_scores = train_scores + self.test_scores = test_scores + self.score_name = score_name + + def plot( + self, + ax=None, + *, + negate_score=False, + score_name=None, + score_type="both", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.validation_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If None, no standard deviation representation is + displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.ValidationCurveDisplay` + Object that stores computed values. + """ + self._plot_curve( + self.param_range, + ax=ax, + negate_score=negate_score, + score_name=score_name, + score_type=score_type, + log_scale="deprecated", + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) + self.ax_.set_xlabel(f"{self.param_name}") + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + param_name, + param_range, + groups=None, + cv=None, + scoring=None, + n_jobs=None, + pre_dispatch="all", + verbose=0, + error_score=np.nan, + fit_params=None, + ax=None, + negate_score=False, + score_name=None, + score_type="both", + std_display_style="fill_between", + line_kw=None, + fill_between_kw=None, + errorbar_kw=None, + ): + """Create a validation curve display from an estimator. + + Read more in the :ref:`User Guide ` for general + information about the visualization API and :ref:`detailed + documentation ` regarding the validation curve + visualization. + + Parameters + ---------- + estimator : object type that implements the "fit" and "predict" methods + An object of that type which is cloned for each validation. + + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + param_name : str + Name of the parameter that will be varied. + + param_range : array-like of shape (n_values,) + The values of the parameter that will be evaluated. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and `y` is + either binary or multiclass, + :class:`~sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`~sklearn.model_selection.KFold` is used. These + splitters are instantiated with `shuffle=False` so the splits will + be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + `scorer(estimator, X, y)` (see :ref:`scoring`). + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + computing the score are parallelized over the different training + and test sets. `None` means 1 unless in a + :obj:`joblib.parallel_backend` context. `-1` means using all + processors. See :term:`Glossary ` for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator + fitting. If set to 'raise', the error is raised. If a numeric value + is given, FitFailedWarning is raised. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + negate_score : bool, default=False + Whether or not to negate the scores obtained through + :func:`~sklearn.model_selection.validation_curve`. This is + particularly useful when using the error denoted by `neg_*` in + `scikit-learn`. + + score_name : str, default=None + The name of the score used to decorate the y-axis of the plot. It will + override the name inferred from the `scoring` parameter. If `score` is + `None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"` + otherwise. If `scoring` is a string or a callable, we infer the name. We + replace `_` by spaces and capitalize the first letter. We remove `neg_` and + replace it by `"Negative"` if `negate_score` is + `False` or just remove it otherwise. + + score_type : {"test", "train", "both"}, default="both" + The type of score to plot. Can be one of `"test"`, `"train"`, or + `"both"`. + + std_display_style : {"errorbar", "fill_between"} or None, default="fill_between" + The style used to display the score standard deviation around the + mean score. If `None`, no representation of the standard deviation + is displayed. + + line_kw : dict, default=None + Additional keyword arguments passed to the `plt.plot` used to draw + the mean score. + + fill_between_kw : dict, default=None + Additional keyword arguments passed to the `plt.fill_between` used + to draw the score standard deviation. + + errorbar_kw : dict, default=None + Additional keyword arguments passed to the `plt.errorbar` used to + draw mean score and standard deviation score. + + Returns + ------- + display : :class:`~sklearn.model_selection.ValidationCurveDisplay` + Object that stores computed values. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import ValidationCurveDisplay + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> ValidationCurveDisplay.from_estimator( + ... logistic_regression, X, y, param_name=param_name, + ... param_range=param_range, + ... ) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + score_name = _validate_score_name(score_name, scoring, negate_score) + + train_scores, test_scores = validation_curve( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + groups=groups, + cv=cv, + scoring=scoring, + n_jobs=n_jobs, + pre_dispatch=pre_dispatch, + verbose=verbose, + error_score=error_score, + fit_params=fit_params, + ) + + viz = cls( + param_name=param_name, + param_range=np.asarray(param_range), + train_scores=train_scores, + test_scores=test_scores, + score_name=score_name, + ) + return viz.plot( + ax=ax, + negate_score=negate_score, + score_type=score_type, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + errorbar_kw=errorbar_kw, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_search.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_search.py new file mode 100644 index 0000000000000000000000000000000000000000..9de03c2c663ec0b8165f2c42f9183d2da7164815 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_search.py @@ -0,0 +1,1918 @@ +""" +The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the +parameters of an estimator. +""" + +# Author: Alexandre Gramfort , +# Gael Varoquaux +# Andreas Mueller +# Olivier Grisel +# Raghav RV +# License: BSD 3 clause + +import numbers +import operator +import time +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from collections.abc import Iterable, Mapping, Sequence +from functools import partial, reduce +from itertools import product + +import numpy as np +from numpy.ma import MaskedArray +from scipy.stats import rankdata + +from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier +from ..exceptions import NotFittedError +from ..metrics import check_scoring +from ..metrics._scorer import ( + _check_multimetric_scoring, + _MultimetricScorer, + get_scorer_names, +) +from ..utils import Bunch, check_random_state +from ..utils._param_validation import HasMethods, Interval, StrOptions +from ..utils._tags import _safe_tags +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import available_if +from ..utils.parallel import Parallel, delayed +from ..utils.random import sample_without_replacement +from ..utils.validation import _check_method_params, check_is_fitted, indexable +from ._split import check_cv +from ._validation import ( + _aggregate_score_dicts, + _fit_and_score, + _insert_error_scores, + _normalize_score_results, + _warn_or_raise_about_fit_failures, +) + +__all__ = ["GridSearchCV", "ParameterGrid", "ParameterSampler", "RandomizedSearchCV"] + + +class ParameterGrid: + """Grid of parameters with a discrete number of values for each. + + Can be used to iterate over parameter value combinations with the + Python built-in function iter. + The order of the generated parameter combinations is deterministic. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + param_grid : dict of str to sequence, or sequence of such + The parameter grid to explore, as a dictionary mapping estimator + parameters to sequences of allowed values. + + An empty dict signifies default parameters. + + A sequence of dicts signifies a sequence of grids to search, and is + useful to avoid exploring parameter combinations that make no sense + or have no effect. See the examples below. + + Examples + -------- + >>> from sklearn.model_selection import ParameterGrid + >>> param_grid = {'a': [1, 2], 'b': [True, False]} + >>> list(ParameterGrid(param_grid)) == ( + ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, + ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) + True + + >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] + >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, + ... {'kernel': 'rbf', 'gamma': 1}, + ... {'kernel': 'rbf', 'gamma': 10}] + True + >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} + True + + See Also + -------- + GridSearchCV : Uses :class:`ParameterGrid` to perform a full parallelized + parameter search. + """ + + def __init__(self, param_grid): + if not isinstance(param_grid, (Mapping, Iterable)): + raise TypeError( + f"Parameter grid should be a dict or a list, got: {param_grid!r} of" + f" type {type(param_grid).__name__}" + ) + + if isinstance(param_grid, Mapping): + # wrap dictionary in a singleton list to support either dict + # or list of dicts + param_grid = [param_grid] + + # check if all entries are dictionaries of lists + for grid in param_grid: + if not isinstance(grid, dict): + raise TypeError(f"Parameter grid is not a dict ({grid!r})") + for key, value in grid.items(): + if isinstance(value, np.ndarray) and value.ndim > 1: + raise ValueError( + f"Parameter array for {key!r} should be one-dimensional, got:" + f" {value!r} with shape {value.shape}" + ) + if isinstance(value, str) or not isinstance( + value, (np.ndarray, Sequence) + ): + raise TypeError( + f"Parameter grid for parameter {key!r} needs to be a list or a" + f" numpy array, but got {value!r} (of type " + f"{type(value).__name__}) instead. Single values " + "need to be wrapped in a list with one element." + ) + if len(value) == 0: + raise ValueError( + f"Parameter grid for parameter {key!r} need " + f"to be a non-empty sequence, got: {value!r}" + ) + + self.param_grid = param_grid + + def __iter__(self): + """Iterate over the points in the grid. + + Returns + ------- + params : iterator over dict of str to any + Yields dictionaries mapping each estimator parameter to one of its + allowed values. + """ + for p in self.param_grid: + # Always sort the keys of a dictionary, for reproducibility + items = sorted(p.items()) + if not items: + yield {} + else: + keys, values = zip(*items) + for v in product(*values): + params = dict(zip(keys, v)) + yield params + + def __len__(self): + """Number of points on the grid.""" + # Product function that can handle iterables (np.prod can't). + product = partial(reduce, operator.mul) + return sum( + product(len(v) for v in p.values()) if p else 1 for p in self.param_grid + ) + + def __getitem__(self, ind): + """Get the parameters that would be ``ind``th in iteration + + Parameters + ---------- + ind : int + The iteration index + + Returns + ------- + params : dict of str to any + Equal to list(self)[ind] + """ + # This is used to make discrete sampling without replacement memory + # efficient. + for sub_grid in self.param_grid: + # XXX: could memoize information used here + if not sub_grid: + if ind == 0: + return {} + else: + ind -= 1 + continue + + # Reverse so most frequent cycling parameter comes first + keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) + sizes = [len(v_list) for v_list in values_lists] + total = np.prod(sizes) + + if ind >= total: + # Try the next grid + ind -= total + else: + out = {} + for key, v_list, n in zip(keys, values_lists, sizes): + ind, offset = divmod(ind, n) + out[key] = v_list[offset] + return out + + raise IndexError("ParameterGrid index out of range") + + +class ParameterSampler: + """Generator on parameters sampled from given distributions. + + Non-deterministic iterable over random candidate combinations for hyper- + parameter search. If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + param_distributions : dict + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_iter : int + Number of parameter settings that are produced. + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for random uniform sampling + from lists of possible values instead of scipy.stats distributions. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + Returns + ------- + params : dict of str to any + **Yields** dictionaries mapping each estimator parameter to + as sampled value. + + Examples + -------- + >>> from sklearn.model_selection import ParameterSampler + >>> from scipy.stats.distributions import expon + >>> import numpy as np + >>> rng = np.random.RandomState(0) + >>> param_grid = {'a':[1, 2], 'b': expon()} + >>> param_list = list(ParameterSampler(param_grid, n_iter=4, + ... random_state=rng)) + >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) + ... for d in param_list] + >>> rounded_list == [{'b': 0.89856, 'a': 1}, + ... {'b': 0.923223, 'a': 1}, + ... {'b': 1.878964, 'a': 2}, + ... {'b': 1.038159, 'a': 2}] + True + """ + + def __init__(self, param_distributions, n_iter, *, random_state=None): + if not isinstance(param_distributions, (Mapping, Iterable)): + raise TypeError( + "Parameter distribution is not a dict or a list," + f" got: {param_distributions!r} of type " + f"{type(param_distributions).__name__}" + ) + + if isinstance(param_distributions, Mapping): + # wrap dictionary in a singleton list to support either dict + # or list of dicts + param_distributions = [param_distributions] + + for dist in param_distributions: + if not isinstance(dist, dict): + raise TypeError( + "Parameter distribution is not a dict ({!r})".format(dist) + ) + for key in dist: + if not isinstance(dist[key], Iterable) and not hasattr( + dist[key], "rvs" + ): + raise TypeError( + f"Parameter grid for parameter {key!r} is not iterable " + f"or a distribution (value={dist[key]})" + ) + self.n_iter = n_iter + self.random_state = random_state + self.param_distributions = param_distributions + + def _is_all_lists(self): + return all( + all(not hasattr(v, "rvs") for v in dist.values()) + for dist in self.param_distributions + ) + + def __iter__(self): + rng = check_random_state(self.random_state) + + # if all distributions are given as lists, we want to sample without + # replacement + if self._is_all_lists(): + # look up sampled parameter settings in parameter grid + param_grid = ParameterGrid(self.param_distributions) + grid_size = len(param_grid) + n_iter = self.n_iter + + if grid_size < n_iter: + warnings.warn( + "The total space of parameters %d is smaller " + "than n_iter=%d. Running %d iterations. For exhaustive " + "searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size), + UserWarning, + ) + n_iter = grid_size + for i in sample_without_replacement(grid_size, n_iter, random_state=rng): + yield param_grid[i] + + else: + for _ in range(self.n_iter): + dist = rng.choice(self.param_distributions) + # Always sort the keys of a dictionary, for reproducibility + items = sorted(dist.items()) + params = dict() + for k, v in items: + if hasattr(v, "rvs"): + params[k] = v.rvs(random_state=rng) + else: + params[k] = v[rng.randint(len(v))] + yield params + + def __len__(self): + """Number of points that will be sampled.""" + if self._is_all_lists(): + grid_size = len(ParameterGrid(self.param_distributions)) + return min(self.n_iter, grid_size) + else: + return self.n_iter + + +def _check_refit(search_cv, attr): + if not search_cv.refit: + raise AttributeError( + f"This {type(search_cv).__name__} instance was initialized with " + f"`refit=False`. {attr} is available only after refitting on the best " + "parameters. You can refit an estimator manually using the " + "`best_params_` attribute" + ) + + +def _estimator_has(attr): + """Check if we can delegate a method to the underlying estimator. + + Calling a prediction method will only be available if `refit=True`. In + such case, we check first the fitted best estimator. If it is not + fitted, we check the unfitted estimator. + + Checking the unfitted estimator allows to use `hasattr` on the `SearchCV` + instance even before calling `fit`. + """ + + def check(self): + _check_refit(self, attr) + if hasattr(self, "best_estimator_"): + # raise an AttributeError if `attr` does not exist + getattr(self.best_estimator_, attr) + return True + # raise an AttributeError if `attr` does not exist + getattr(self.estimator, attr) + return True + + return check + + +class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): + """Abstract base class for hyper parameter search with cross-validation.""" + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit"])], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_jobs": [numbers.Integral, None], + "refit": ["boolean", str, callable], + "cv": ["cv_object"], + "verbose": ["verbose"], + "pre_dispatch": [numbers.Integral, str], + "error_score": [StrOptions({"raise"}), numbers.Real], + "return_train_score": ["boolean"], + } + + @abstractmethod + def __init__( + self, + estimator, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + error_score=np.nan, + return_train_score=True, + ): + self.scoring = scoring + self.estimator = estimator + self.n_jobs = n_jobs + self.refit = refit + self.cv = cv + self.verbose = verbose + self.pre_dispatch = pre_dispatch + self.error_score = error_score + self.return_train_score = return_train_score + + @property + def _estimator_type(self): + return self.estimator._estimator_type + + def _more_tags(self): + # allows cross-validation to see 'precomputed' metrics + return { + "pairwise": _safe_tags(self.estimator, "pairwise"), + "_xfail_checks": { + "check_supervised_y_2d": "DataConversionWarning not caught" + }, + } + + def score(self, X, y=None, **params): + """Return the score on the given data, if the estimator has been refit. + + This uses the score defined by ``scoring`` where provided, and the + ``best_estimator_.score`` method otherwise. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples, n_output) \ + or (n_samples,), default=None + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict + Parameters to be passed to the underlying scorer(s). + + ..versionadded:: 1.4 + Only available if `enable_metadata_routing=True`. See + :ref:`Metadata Routing User Guide ` for more + details. + + Returns + ------- + score : float + The score defined by ``scoring`` if provided, and the + ``best_estimator_.score`` method otherwise. + """ + _check_refit(self, "score") + check_is_fitted(self) + + _raise_for_params(params, self, "score") + + if _routing_enabled(): + score_params = process_routing(self, "score", **params).scorer["score"] + else: + score_params = dict() + + if self.scorer_ is None: + raise ValueError( + "No score function explicitly defined, " + "and the estimator doesn't provide one %s" + % self.best_estimator_ + ) + if isinstance(self.scorer_, dict): + if self.multimetric_: + scorer = self.scorer_[self.refit] + else: + scorer = self.scorer_ + return scorer(self.best_estimator_, X, y, **score_params) + + # callable + score = self.scorer_(self.best_estimator_, X, y, **score_params) + if self.multimetric_: + score = score[self.refit] + return score + + @available_if(_estimator_has("score_samples")) + def score_samples(self, X): + """Call score_samples on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``score_samples``. + + .. versionadded:: 0.24 + + Parameters + ---------- + X : iterable + Data to predict on. Must fulfill input requirements + of the underlying estimator. + + Returns + ------- + y_score : ndarray of shape (n_samples,) + The ``best_estimator_.score_samples`` method. + """ + check_is_fitted(self) + return self.best_estimator_.score_samples(X) + + @available_if(_estimator_has("predict")) + def predict(self, X): + """Call predict on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The predicted labels or values for `X` based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.predict(X) + + @available_if(_estimator_has("predict_proba")) + def predict_proba(self, X): + """Call predict_proba on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict_proba``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes) + Predicted class probabilities for `X` based on the estimator with + the best found parameters. The order of the classes corresponds + to that in the fitted attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.best_estimator_.predict_proba(X) + + @available_if(_estimator_has("predict_log_proba")) + def predict_log_proba(self, X): + """Call predict_log_proba on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``predict_log_proba``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes) + Predicted class log-probabilities for `X` based on the estimator + with the best found parameters. The order of the classes + corresponds to that in the fitted attribute :term:`classes_`. + """ + check_is_fitted(self) + return self.best_estimator_.predict_log_proba(X) + + @available_if(_estimator_has("decision_function")) + def decision_function(self, X): + """Call decision_function on the estimator with the best found parameters. + + Only available if ``refit=True`` and the underlying estimator supports + ``decision_function``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \ + or (n_samples, n_classes * (n_classes-1) / 2) + Result of the decision function for `X` based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.decision_function(X) + + @available_if(_estimator_has("transform")) + def transform(self, X): + """Call transform on the estimator with the best found parameters. + + Only available if the underlying estimator supports ``transform`` and + ``refit=True``. + + Parameters + ---------- + X : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) + `X` transformed in the new space based on the estimator with + the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.transform(X) + + @available_if(_estimator_has("inverse_transform")) + def inverse_transform(self, Xt): + """Call inverse_transform on the estimator with the best found params. + + Only available if the underlying estimator implements + ``inverse_transform`` and ``refit=True``. + + Parameters + ---------- + Xt : indexable, length n_samples + Must fulfill the input assumptions of the + underlying estimator. + + Returns + ------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Result of the `inverse_transform` function for `Xt` based on the + estimator with the best found parameters. + """ + check_is_fitted(self) + return self.best_estimator_.inverse_transform(Xt) + + @property + def n_features_in_(self): + """Number of features seen during :term:`fit`. + + Only available when `refit=True`. + """ + # For consistency with other estimators we raise a AttributeError so + # that hasattr() fails if the search estimator isn't fitted. + try: + check_is_fitted(self) + except NotFittedError as nfe: + raise AttributeError( + "{} object has no n_features_in_ attribute.".format( + self.__class__.__name__ + ) + ) from nfe + + return self.best_estimator_.n_features_in_ + + @property + def classes_(self): + """Class labels. + + Only available when `refit=True` and the estimator is a classifier. + """ + _estimator_has("classes_")(self) + return self.best_estimator_.classes_ + + def _run_search(self, evaluate_candidates): + """Repeatedly calls `evaluate_candidates` to conduct a search. + + This method, implemented in sub-classes, makes it possible to + customize the scheduling of evaluations: GridSearchCV and + RandomizedSearchCV schedule evaluations for their whole parameter + search space at once but other more sequential approaches are also + possible: for instance is possible to iteratively schedule evaluations + for new regions of the parameter search space based on previously + collected evaluation results. This makes it possible to implement + Bayesian optimization or more generally sequential model-based + optimization by deriving from the BaseSearchCV abstract base class. + For example, Successive Halving is implemented by calling + `evaluate_candidates` multiples times (once per iteration of the SH + process), each time passing a different set of candidates with `X` + and `y` of increasing sizes. + + Parameters + ---------- + evaluate_candidates : callable + This callback accepts: + - a list of candidates, where each candidate is a dict of + parameter settings. + - an optional `cv` parameter which can be used to e.g. + evaluate candidates on different dataset splits, or + evaluate candidates on subsampled data (as done in the + SucessiveHaling estimators). By default, the original `cv` + parameter is used, and it is available as a private + `_checked_cv_orig` attribute. + - an optional `more_results` dict. Each key will be added to + the `cv_results_` attribute. Values should be lists of + length `n_candidates` + + It returns a dict of all results so far, formatted like + ``cv_results_``. + + Important note (relevant whether the default cv is used or not): + in randomized splitters, and unless the random_state parameter of + cv was set to an int, calling cv.split() multiple times will + yield different splits. Since cv.split() is called in + evaluate_candidates, this means that candidates will be evaluated + on different splits each time evaluate_candidates is called. This + might be a methodological issue depending on the search strategy + that you're implementing. To prevent randomized splitters from + being used, you may use _split._yields_constant_splits() + + Examples + -------- + + :: + + def _run_search(self, evaluate_candidates): + 'Try C=0.1 only if C=1 is better than C=10' + all_results = evaluate_candidates([{'C': 1}, {'C': 10}]) + score = all_results['mean_test_score'] + if score[0] < score[1]: + evaluate_candidates([{'C': 0.1}]) + """ + raise NotImplementedError("_run_search not implemented.") + + def _check_refit_for_multimetric(self, scores): + """Check `refit` is compatible with `scores` is valid""" + multimetric_refit_msg = ( + "For multi-metric scoring, the parameter refit must be set to a " + "scorer key or a callable to refit an estimator with the best " + "parameter setting on the whole data and make the best_* " + "attributes available for that metric. If this is not needed, " + f"refit should be set to False explicitly. {self.refit!r} was " + "passed." + ) + + valid_refit_dict = isinstance(self.refit, str) and self.refit in scores + + if ( + self.refit is not False + and not valid_refit_dict + and not callable(self.refit) + ): + raise ValueError(multimetric_refit_msg) + + @staticmethod + def _select_best_index(refit, refit_metric, results): + """Select index of the best combination of hyperparemeters.""" + if callable(refit): + # If callable, refit is expected to return the index of the best + # parameter set. + best_index = refit(results) + if not isinstance(best_index, numbers.Integral): + raise TypeError("best_index_ returned is not an integer") + if best_index < 0 or best_index >= len(results["params"]): + raise IndexError("best_index_ index out of range") + else: + best_index = results[f"rank_test_{refit_metric}"].argmin() + return best_index + + def _get_scorers(self, convert_multimetric): + """Get the scorer(s) to be used. + + This is used in ``fit`` and ``get_metadata_routing``. + + Parameters + ---------- + convert_multimetric : bool + Whether to convert a dict of scorers to a _MultimetricScorer. This + is used in ``get_metadata_routing`` to include the routing info for + multiple scorers. + + Returns + ------- + scorers, refit_metric + """ + refit_metric = "score" + + if callable(self.scoring): + scorers = self.scoring + elif self.scoring is None or isinstance(self.scoring, str): + scorers = check_scoring(self.estimator, self.scoring) + else: + scorers = _check_multimetric_scoring(self.estimator, self.scoring) + self._check_refit_for_multimetric(scorers) + refit_metric = self.refit + if convert_multimetric and isinstance(scorers, dict): + scorers = _MultimetricScorer( + scorers=scorers, raise_exc=(self.error_score == "raise") + ) + + return scorers, refit_metric + + def _get_routed_params_for_fit(self, params): + """Get the parameters to be used for routing. + + This is a method instead of a snippet in ``fit`` since it's used twice, + here in ``fit``, and in ``HalvingRandomSearchCV.fit``. + """ + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + params = params.copy() + groups = params.pop("groups", None) + routed_params = Bunch( + estimator=Bunch(fit=params), + splitter=Bunch(split={"groups": groups}), + scorer=Bunch(score={}), + ) + return routed_params + + @_fit_context( + # *SearchCV.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y=None, **params): + """Run fit with all sets of parameters. + + Parameters + ---------- + + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples, n_output) \ + or (n_samples,), default=None + Target relative to X for classification or regression; + None for unsupervised learning. + + **params : dict of str -> object + Parameters passed to the ``fit`` method of the estimator, the scorer, + and the CV splitter. + + If a fit parameter is an array-like whose length is equal to + `num_samples` then it will be split across CV groups along with `X` + and `y`. For example, the :term:`sample_weight` parameter is split + because `len(sample_weights) = len(X)`. + + Returns + ------- + self : object + Instance of fitted estimator. + """ + estimator = self.estimator + # Here we keep a dict of scorers as is, and only convert to a + # _MultimetricScorer at a later stage. Issue: + # https://github.com/scikit-learn/scikit-learn/issues/27001 + scorers, refit_metric = self._get_scorers(convert_multimetric=False) + + X, y = indexable(X, y) + params = _check_method_params(X, params=params) + + routed_params = self._get_routed_params_for_fit(params) + + cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator)) + n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split) + + base_estimator = clone(self.estimator) + + parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch) + + fit_and_score_kwargs = dict( + scorer=scorers, + fit_params=routed_params.estimator.fit, + score_params=routed_params.scorer.score, + return_train_score=self.return_train_score, + return_n_test_samples=True, + return_times=True, + return_parameters=False, + error_score=self.error_score, + verbose=self.verbose, + ) + results = {} + with parallel: + all_candidate_params = [] + all_out = [] + all_more_results = defaultdict(list) + + def evaluate_candidates(candidate_params, cv=None, more_results=None): + cv = cv or cv_orig + candidate_params = list(candidate_params) + n_candidates = len(candidate_params) + + if self.verbose > 0: + print( + "Fitting {0} folds for each of {1} candidates," + " totalling {2} fits".format( + n_splits, n_candidates, n_candidates * n_splits + ) + ) + + out = parallel( + delayed(_fit_and_score)( + clone(base_estimator), + X, + y, + train=train, + test=test, + parameters=parameters, + split_progress=(split_idx, n_splits), + candidate_progress=(cand_idx, n_candidates), + **fit_and_score_kwargs, + ) + for (cand_idx, parameters), (split_idx, (train, test)) in product( + enumerate(candidate_params), + enumerate(cv.split(X, y, **routed_params.splitter.split)), + ) + ) + + if len(out) < 1: + raise ValueError( + "No fits were performed. " + "Was the CV iterator empty? " + "Were there no candidates?" + ) + elif len(out) != n_candidates * n_splits: + raise ValueError( + "cv.split and cv.get_n_splits returned " + "inconsistent results. Expected {} " + "splits, got {}".format(n_splits, len(out) // n_candidates) + ) + + _warn_or_raise_about_fit_failures(out, self.error_score) + + # For callable self.scoring, the return type is only know after + # calling. If the return type is a dictionary, the error scores + # can now be inserted with the correct key. The type checking + # of out will be done in `_insert_error_scores`. + if callable(self.scoring): + _insert_error_scores(out, self.error_score) + + all_candidate_params.extend(candidate_params) + all_out.extend(out) + + if more_results is not None: + for key, value in more_results.items(): + all_more_results[key].extend(value) + + nonlocal results + results = self._format_results( + all_candidate_params, n_splits, all_out, all_more_results + ) + + return results + + self._run_search(evaluate_candidates) + + # multimetric is determined here because in the case of a callable + # self.scoring the return type is only known after calling + first_test_score = all_out[0]["test_scores"] + self.multimetric_ = isinstance(first_test_score, dict) + + # check refit_metric now for a callabe scorer that is multimetric + if callable(self.scoring) and self.multimetric_: + self._check_refit_for_multimetric(first_test_score) + refit_metric = self.refit + + # For multi-metric evaluation, store the best_index_, best_params_ and + # best_score_ iff refit is one of the scorer names + # In single metric evaluation, refit_metric is "score" + if self.refit or not self.multimetric_: + self.best_index_ = self._select_best_index( + self.refit, refit_metric, results + ) + if not callable(self.refit): + # With a non-custom callable, we can select the best score + # based on the best index + self.best_score_ = results[f"mean_test_{refit_metric}"][ + self.best_index_ + ] + self.best_params_ = results["params"][self.best_index_] + + if self.refit: + # here we clone the estimator as well as the parameters, since + # sometimes the parameters themselves might be estimators, e.g. + # when we search over different estimators in a pipeline. + # ref: https://github.com/scikit-learn/scikit-learn/pull/26786 + self.best_estimator_ = clone(base_estimator).set_params( + **clone(self.best_params_, safe=False) + ) + + refit_start_time = time.time() + if y is not None: + self.best_estimator_.fit(X, y, **routed_params.estimator.fit) + else: + self.best_estimator_.fit(X, **routed_params.estimator.fit) + refit_end_time = time.time() + self.refit_time_ = refit_end_time - refit_start_time + + if hasattr(self.best_estimator_, "feature_names_in_"): + self.feature_names_in_ = self.best_estimator_.feature_names_in_ + + # Store the only scorer not as a dict for single metric evaluation + self.scorer_ = scorers + + self.cv_results_ = results + self.n_splits_ = n_splits + + return self + + def _format_results(self, candidate_params, n_splits, out, more_results=None): + n_candidates = len(candidate_params) + out = _aggregate_score_dicts(out) + + results = dict(more_results or {}) + for key, val in results.items(): + # each value is a list (as per evaluate_candidate's convention) + # we convert it to an array for consistency with the other keys + results[key] = np.asarray(val) + + def _store(key_name, array, weights=None, splits=False, rank=False): + """A small helper to store the scores/times to the cv_results_""" + # When iterated first by splits, then by parameters + # We want `array` to have `n_candidates` rows and `n_splits` cols. + array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits) + if splits: + for split_idx in range(n_splits): + # Uses closure to alter the results + results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx] + + array_means = np.average(array, axis=1, weights=weights) + results["mean_%s" % key_name] = array_means + + if key_name.startswith(("train_", "test_")) and np.any( + ~np.isfinite(array_means) + ): + warnings.warn( + ( + f"One or more of the {key_name.split('_')[0]} scores " + f"are non-finite: {array_means}" + ), + category=UserWarning, + ) + + # Weighted std is not directly available in numpy + array_stds = np.sqrt( + np.average( + (array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights + ) + ) + results["std_%s" % key_name] = array_stds + + if rank: + # When the fit/scoring fails `array_means` contains NaNs, we + # will exclude them from the ranking process and consider them + # as tied with the worst performers. + if np.isnan(array_means).all(): + # All fit/scoring routines failed. + rank_result = np.ones_like(array_means, dtype=np.int32) + else: + min_array_means = np.nanmin(array_means) - 1 + array_means = np.nan_to_num(array_means, nan=min_array_means) + rank_result = rankdata(-array_means, method="min").astype( + np.int32, copy=False + ) + results["rank_%s" % key_name] = rank_result + + _store("fit_time", out["fit_time"]) + _store("score_time", out["score_time"]) + # Use one MaskedArray and mask all the places where the param is not + # applicable for that candidate. Use defaultdict as each candidate may + # not contain all the params + param_results = defaultdict( + partial( + MaskedArray, + np.empty( + n_candidates, + ), + mask=True, + dtype=object, + ) + ) + for cand_idx, params in enumerate(candidate_params): + for name, value in params.items(): + # An all masked empty array gets created for the key + # `"param_%s" % name` at the first occurrence of `name`. + # Setting the value at an index also unmasks that index + param_results["param_%s" % name][cand_idx] = value + + results.update(param_results) + # Store a list of param dicts at the key 'params' + results["params"] = candidate_params + + test_scores_dict = _normalize_score_results(out["test_scores"]) + if self.return_train_score: + train_scores_dict = _normalize_score_results(out["train_scores"]) + + for scorer_name in test_scores_dict: + # Computed the (weighted) mean and std for test scores alone + _store( + "test_%s" % scorer_name, + test_scores_dict[scorer_name], + splits=True, + rank=True, + weights=None, + ) + if self.return_train_score: + _store( + "train_%s" % scorer_name, + train_scores_dict[scorer_name], + splits=True, + ) + + return results + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__) + router.add( + estimator=self.estimator, + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + + scorer, _ = self._get_scorers(convert_multimetric=True) + router.add( + scorer=scorer, + method_mapping=MethodMapping() + .add(caller="score", callee="score") + .add(caller="fit", callee="score"), + ) + router.add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + return router + + +class GridSearchCV(BaseSearchCV): + """Exhaustive search over specified parameter values for an estimator. + + Important members are fit, predict. + + GridSearchCV implements a "fit" and a "score" method. + It also implements "score_samples", "predict", "predict_proba", + "decision_function", "transform" and "inverse_transform" if they are + implemented in the estimator used. + + The parameters of the estimator used to apply these methods are optimized + by cross-validated grid-search over a parameter grid. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_grid : dict or list of dictionaries + Dictionary with parameters names (`str`) as keys and lists of + parameter settings to try as values, or a list of such + dictionaries, in which case the grids spanned by each dictionary + in the list are explored. This enables searching over any sequence + of parameter settings. + + scoring : str, callable, list, tuple or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + refit : bool, str, or callable, default=True + Refit an estimator using the best found parameters on the whole + dataset. + + For multiple metric evaluation, this needs to be a `str` denoting the + scorer that would be used to find the best parameters for refitting + the estimator at the end. + + Where there are considerations other than maximum score in + choosing a best estimator, ``refit`` can be set to a function which + returns the selected ``best_index_`` given ``cv_results_``. In that + case, the ``best_estimator_`` and ``best_params_`` will be set + according to the returned ``best_index_`` while the ``best_score_`` + attribute will not be available. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``GridSearchCV`` instance. + + Also for multiple metric evaluation, the attributes ``best_index_``, + ``best_score_`` and ``best_params_`` will only be available if + ``refit`` is set and all of them will be determined w.r.t this specific + scorer. + + See ``scoring`` parameter to know more about multiple metric + evaluation. + + See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py` + to see how to design a custom selection strategy using a callable + via `refit`. + + .. versionchanged:: 0.20 + Support for callable added. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : int + Controls the verbosity: the higher, the more messages. + + - >1 : the computation time for each fold and parameter candidate is + displayed; + - >2 : the score is also displayed; + - >3 : the fold and candidate parameter indexes are also displayed + together with the starting time of the computation. + + pre_dispatch : int, or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + Attributes + ---------- + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. + + For instance the below given table + + +------------+-----------+------------+-----------------+---+---------+ + |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...| + +============+===========+============+=================+===+=========+ + | 'poly' | -- | 2 | 0.80 |...| 2 | + +------------+-----------+------------+-----------------+---+---------+ + | 'poly' | -- | 3 | 0.70 |...| 4 | + +------------+-----------+------------+-----------------+---+---------+ + | 'rbf' | 0.1 | -- | 0.80 |...| 3 | + +------------+-----------+------------+-----------------+---+---------+ + | 'rbf' | 0.2 | -- | 0.93 |...| 1 | + +------------+-----------+------------+-----------------+---+---------+ + + will be represented by a ``cv_results_`` dict of:: + + { + 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], + mask = [False False False False]...) + 'param_gamma': masked_array(data = [-- -- 0.1 0.2], + mask = [ True True False False]...), + 'param_degree': masked_array(data = [2.0 3.0 -- --], + mask = [False False True True]...), + 'split0_test_score' : [0.80, 0.70, 0.80, 0.93], + 'split1_test_score' : [0.82, 0.50, 0.70, 0.78], + 'mean_test_score' : [0.81, 0.60, 0.75, 0.85], + 'std_test_score' : [0.01, 0.10, 0.05, 0.08], + 'rank_test_score' : [2, 4, 3, 1], + 'split0_train_score' : [0.80, 0.92, 0.70, 0.93], + 'split1_train_score' : [0.82, 0.55, 0.70, 0.87], + 'mean_train_score' : [0.81, 0.74, 0.70, 0.90], + 'std_train_score' : [0.01, 0.19, 0.00, 0.03], + 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49], + 'std_fit_time' : [0.01, 0.02, 0.01, 0.01], + 'mean_score_time' : [0.01, 0.06, 0.04, 0.04], + 'std_score_time' : [0.00, 0.00, 0.00, 0.01], + 'params' : [{'kernel': 'poly', 'degree': 2}, ...], + } + + NOTE + + The key ``'params'`` is used to store a list of parameter + settings dicts for all the parameter candidates. + + The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and + ``std_score_time`` are all in seconds. + + For multi-metric evaluation, the scores for all the scorers are + available in the ``cv_results_`` dict at the keys ending with that + scorer's name (``'_'``) instead of ``'_score'`` shown + above. ('split0_test_precision', 'mean_train_precision' etc.) + + best_estimator_ : estimator + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + See ``refit`` parameter for more information on allowed values. + + best_score_ : float + Mean cross-validated score of the best_estimator + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + This attribute is not available if ``refit`` is a function. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + For multi-metric evaluation, this is present only if ``refit`` is + specified. + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + For multi-metric evaluation, this attribute holds the validated + ``scoring`` dict which maps the scorer key to the scorer callable. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + .. versionadded:: 0.20 + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + ParameterGrid : Generates all the combinations of a hyperparameter grid. + train_test_split : Utility function to split the data into a development + set usable for fitting a GridSearchCV instance and an evaluation set + for its final evaluation. + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Notes + ----- + The parameters selected are those that maximize the score of the left out + data, unless an explicit score is passed in which case it is used instead. + + If `n_jobs` was set to a value higher than one, the data is copied for each + point in the grid (and not `n_jobs` times). This is done for efficiency + reasons if individual jobs take very little time, but may raise errors if + the dataset is large and not enough memory is available. A workaround in + this case is to set `pre_dispatch`. Then, the memory is copied only + `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * + n_jobs`. + + Examples + -------- + >>> from sklearn import svm, datasets + >>> from sklearn.model_selection import GridSearchCV + >>> iris = datasets.load_iris() + >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} + >>> svc = svm.SVC() + >>> clf = GridSearchCV(svc, parameters) + >>> clf.fit(iris.data, iris.target) + GridSearchCV(estimator=SVC(), + param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')}) + >>> sorted(clf.cv_results_.keys()) + ['mean_fit_time', 'mean_score_time', 'mean_test_score',... + 'param_C', 'param_kernel', 'params',... + 'rank_test_score', 'split0_test_score',... + 'split2_test_score', ... + 'std_fit_time', 'std_score_time', 'std_test_score'] + """ + + _required_parameters = ["estimator", "param_grid"] + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + "param_grid": [dict, list], + } + + def __init__( + self, + estimator, + param_grid, + *, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + error_score=np.nan, + return_train_score=False, + ): + super().__init__( + estimator=estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + pre_dispatch=pre_dispatch, + error_score=error_score, + return_train_score=return_train_score, + ) + self.param_grid = param_grid + + def _run_search(self, evaluate_candidates): + """Search all candidates in param_grid""" + evaluate_candidates(ParameterGrid(self.param_grid)) + + +class RandomizedSearchCV(BaseSearchCV): + """Randomized search on hyper parameters. + + RandomizedSearchCV implements a "fit" and a "score" method. + It also implements "score_samples", "predict", "predict_proba", + "decision_function", "transform" and "inverse_transform" if they are + implemented in the estimator used. + + The parameters of the estimator used to apply these methods are optimized + by cross-validated search over parameter settings. + + In contrast to GridSearchCV, not all parameter values are tried out, but + rather a fixed number of parameter settings is sampled from the specified + distributions. The number of parameter settings that are tried is + given by n_iter. + + If all parameters are presented as a list, + sampling without replacement is performed. If at least one parameter + is given as a distribution, sampling with replacement is used. + It is highly recommended to use continuous distributions for continuous + parameters. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : estimator object + An object of that type is instantiated for each grid point. + This is assumed to implement the scikit-learn estimator interface. + Either estimator needs to provide a ``score`` function, + or ``scoring`` must be passed. + + param_distributions : dict or list of dicts + Dictionary with parameters names (`str`) as keys and distributions + or lists of parameters to try. Distributions must provide a ``rvs`` + method for sampling (such as those from scipy.stats.distributions). + If a list is given, it is sampled uniformly. + If a list of dicts is given, first a dict is sampled uniformly, and + then a parameter is sampled using that dict as above. + + n_iter : int, default=10 + Number of parameter settings that are sampled. n_iter trades + off runtime vs quality of the solution. + + scoring : str, callable, list, tuple or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + If None, the estimator's score method is used. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + refit : bool, str, or callable, default=True + Refit an estimator using the best found parameters on the whole + dataset. + + For multiple metric evaluation, this needs to be a `str` denoting the + scorer that would be used to find the best parameters for refitting + the estimator at the end. + + Where there are considerations other than maximum score in + choosing a best estimator, ``refit`` can be set to a function which + returns the selected ``best_index_`` given the ``cv_results``. In that + case, the ``best_estimator_`` and ``best_params_`` will be set + according to the returned ``best_index_`` while the ``best_score_`` + attribute will not be available. + + The refitted estimator is made available at the ``best_estimator_`` + attribute and permits using ``predict`` directly on this + ``RandomizedSearchCV`` instance. + + Also for multiple metric evaluation, the attributes ``best_index_``, + ``best_score_`` and ``best_params_`` will only be available if + ``refit`` is set and all of them will be determined w.r.t this specific + scorer. + + See ``scoring`` parameter to know more about multiple metric + evaluation. + + .. versionchanged:: 0.20 + Support for callable added. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + verbose : int + Controls the verbosity: the higher, the more messages. + + - >1 : the computation time for each fold and parameter candidate is + displayed; + - >2 : the score is also displayed; + - >3 : the fold and candidate parameter indexes are also displayed + together with the starting time of the computation. + + pre_dispatch : int, or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + random_state : int, RandomState instance or None, default=None + Pseudo random number generator state used for random uniform sampling + from lists of possible values instead of scipy.stats distributions. + Pass an int for reproducible output across multiple + function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. If a numeric value is given, + FitFailedWarning is raised. This parameter does not affect the refit + step, which will always raise the error. + + return_train_score : bool, default=False + If ``False``, the ``cv_results_`` attribute will not include training + scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + Attributes + ---------- + cv_results_ : dict of numpy (masked) ndarrays + A dict with keys as column headers and values as columns, that can be + imported into a pandas ``DataFrame``. + + For instance the below given table + + +--------------+-------------+-------------------+---+---------------+ + | param_kernel | param_gamma | split0_test_score |...|rank_test_score| + +==============+=============+===================+===+===============+ + | 'rbf' | 0.1 | 0.80 |...| 1 | + +--------------+-------------+-------------------+---+---------------+ + | 'rbf' | 0.2 | 0.84 |...| 3 | + +--------------+-------------+-------------------+---+---------------+ + | 'rbf' | 0.3 | 0.70 |...| 2 | + +--------------+-------------+-------------------+---+---------------+ + + will be represented by a ``cv_results_`` dict of:: + + { + 'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'], + mask = False), + 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), + 'split0_test_score' : [0.80, 0.84, 0.70], + 'split1_test_score' : [0.82, 0.50, 0.70], + 'mean_test_score' : [0.81, 0.67, 0.70], + 'std_test_score' : [0.01, 0.24, 0.00], + 'rank_test_score' : [1, 3, 2], + 'split0_train_score' : [0.80, 0.92, 0.70], + 'split1_train_score' : [0.82, 0.55, 0.70], + 'mean_train_score' : [0.81, 0.74, 0.70], + 'std_train_score' : [0.01, 0.19, 0.00], + 'mean_fit_time' : [0.73, 0.63, 0.43], + 'std_fit_time' : [0.01, 0.02, 0.01], + 'mean_score_time' : [0.01, 0.06, 0.04], + 'std_score_time' : [0.00, 0.00, 0.00], + 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], + } + + NOTE + + The key ``'params'`` is used to store a list of parameter + settings dicts for all the parameter candidates. + + The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and + ``std_score_time`` are all in seconds. + + For multi-metric evaluation, the scores for all the scorers are + available in the ``cv_results_`` dict at the keys ending with that + scorer's name (``'_'``) instead of ``'_score'`` shown + above. ('split0_test_precision', 'mean_train_precision' etc.) + + best_estimator_ : estimator + Estimator that was chosen by the search, i.e. estimator + which gave highest score (or smallest loss if specified) + on the left out data. Not available if ``refit=False``. + + For multi-metric evaluation, this attribute is present only if + ``refit`` is specified. + + See ``refit`` parameter for more information on allowed values. + + best_score_ : float + Mean cross-validated score of the best_estimator. + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + This attribute is not available if ``refit`` is a function. + + best_params_ : dict + Parameter setting that gave the best results on the hold out data. + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + best_index_ : int + The index (of the ``cv_results_`` arrays) which corresponds to the best + candidate parameter setting. + + The dict at ``search.cv_results_['params'][search.best_index_]`` gives + the parameter setting for the best model, that gives the highest + mean score (``search.best_score_``). + + For multi-metric evaluation, this is not available if ``refit`` is + ``False``. See ``refit`` parameter for more information. + + scorer_ : function or a dict + Scorer function used on the held out data to choose the best + parameters for the model. + + For multi-metric evaluation, this attribute holds the validated + ``scoring`` dict which maps the scorer key to the scorer callable. + + n_splits_ : int + The number of cross-validation splits (folds/iterations). + + refit_time_ : float + Seconds used for refitting the best model on the whole dataset. + + This is present only if ``refit`` is not False. + + .. versionadded:: 0.20 + + multimetric_ : bool + Whether or not the scorers compute several metrics. + + classes_ : ndarray of shape (n_classes,) + The classes labels. This is present only if ``refit`` is specified and + the underlying estimator is a classifier. + + n_features_in_ : int + Number of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `n_features_in_` when fit. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Only defined if + `best_estimator_` is defined (see the documentation for the `refit` + parameter for more details) and that `best_estimator_` exposes + `feature_names_in_` when fit. + + .. versionadded:: 1.0 + + See Also + -------- + GridSearchCV : Does exhaustive search over a grid of parameters. + ParameterSampler : A generator over parameter settings, constructed from + param_distributions. + + Notes + ----- + The parameters selected are those that maximize the score of the held-out + data, according to the scoring parameter. + + If `n_jobs` was set to a value higher than one, the data is copied for each + parameter setting(and not `n_jobs` times). This is done for efficiency + reasons if individual jobs take very little time, but may raise errors if + the dataset is large and not enough memory is available. A workaround in + this case is to set `pre_dispatch`. Then, the memory is copied only + `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * + n_jobs`. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import RandomizedSearchCV + >>> from scipy.stats import uniform + >>> iris = load_iris() + >>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200, + ... random_state=0) + >>> distributions = dict(C=uniform(loc=0, scale=4), + ... penalty=['l2', 'l1']) + >>> clf = RandomizedSearchCV(logistic, distributions, random_state=0) + >>> search = clf.fit(iris.data, iris.target) + >>> search.best_params_ + {'C': 2..., 'penalty': 'l1'} + """ + + _required_parameters = ["estimator", "param_distributions"] + + _parameter_constraints: dict = { + **BaseSearchCV._parameter_constraints, + "param_distributions": [dict, list], + "n_iter": [Interval(numbers.Integral, 1, None, closed="left")], + "random_state": ["random_state"], + } + + def __init__( + self, + estimator, + param_distributions, + *, + n_iter=10, + scoring=None, + n_jobs=None, + refit=True, + cv=None, + verbose=0, + pre_dispatch="2*n_jobs", + random_state=None, + error_score=np.nan, + return_train_score=False, + ): + self.param_distributions = param_distributions + self.n_iter = n_iter + self.random_state = random_state + super().__init__( + estimator=estimator, + scoring=scoring, + n_jobs=n_jobs, + refit=refit, + cv=cv, + verbose=verbose, + pre_dispatch=pre_dispatch, + error_score=error_score, + return_train_score=return_train_score, + ) + + def _run_search(self, evaluate_candidates): + """Search n_iter candidates from param_distributions""" + evaluate_candidates( + ParameterSampler( + self.param_distributions, self.n_iter, random_state=self.random_state + ) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_split.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_split.py new file mode 100644 index 0000000000000000000000000000000000000000..1f89832daba227163f0639268deeebd7a26cae62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_split.py @@ -0,0 +1,2794 @@ +""" +The :mod:`sklearn.model_selection._split` module includes classes and +functions to split the data based on a preset strategy. +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Olivier Grisel +# Raghav RV +# Leandro Hermida +# Rodion Martynov +# License: BSD 3 clause + +import numbers +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict +from collections.abc import Iterable +from inspect import signature +from itertools import chain, combinations +from math import ceil, floor + +import numpy as np +from scipy.special import comb + +from ..utils import ( + _approximate_mode, + _safe_indexing, + check_random_state, + indexable, + metadata_routing, +) +from ..utils._param_validation import Interval, RealNotInt, validate_params +from ..utils.metadata_routing import _MetadataRequester +from ..utils.multiclass import type_of_target +from ..utils.validation import _num_samples, check_array, column_or_1d + +__all__ = [ + "BaseCrossValidator", + "KFold", + "GroupKFold", + "LeaveOneGroupOut", + "LeaveOneOut", + "LeavePGroupsOut", + "LeavePOut", + "RepeatedStratifiedKFold", + "RepeatedKFold", + "ShuffleSplit", + "GroupShuffleSplit", + "StratifiedKFold", + "StratifiedGroupKFold", + "StratifiedShuffleSplit", + "PredefinedSplit", + "train_test_split", + "check_cv", +] + + +class GroupsConsumerMixin(_MetadataRequester): + """A Mixin to ``groups`` by default. + + This Mixin makes the object to request ``groups`` by default as ``True``. + + .. versionadded:: 1.3 + """ + + __metadata_request__split = {"groups": True} + + +class BaseCrossValidator(_MetadataRequester, metaclass=ABCMeta): + """Base class for all cross-validators. + + Implementations must define `_iter_test_masks` or `_iter_test_indices`. + """ + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + indices = np.arange(_num_samples(X)) + for test_index in self._iter_test_masks(X, y, groups): + train_index = indices[np.logical_not(test_index)] + test_index = indices[test_index] + yield train_index, test_index + + # Since subclasses must implement either _iter_test_masks or + # _iter_test_indices, neither can be abstract. + def _iter_test_masks(self, X=None, y=None, groups=None): + """Generates boolean masks corresponding to test sets. + + By default, delegates to _iter_test_indices(X, y, groups) + """ + for test_index in self._iter_test_indices(X, y, groups): + test_mask = np.zeros(_num_samples(X), dtype=bool) + test_mask[test_index] = True + yield test_mask + + def _iter_test_indices(self, X=None, y=None, groups=None): + """Generates integer indices corresponding to test sets.""" + raise NotImplementedError + + @abstractmethod + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator.""" + + def __repr__(self): + return _build_repr(self) + + +class LeaveOneOut(BaseCrossValidator): + """Leave-One-Out cross-validator. + + Provides train/test indices to split data in train/test sets. Each + sample is used once as a test set (singleton) while the remaining + samples form the training set. + + Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and + ``LeavePOut(p=1)`` where ``n`` is the number of samples. + + Due to the high number of test sets (which is the same as the + number of samples) this cross-validation method can be very costly. + For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit` + or :class:`StratifiedKFold`. + + Read more in the :ref:`User Guide `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeaveOneOut + >>> X = np.array([[1, 2], [3, 4]]) + >>> y = np.array([1, 2]) + >>> loo = LeaveOneOut() + >>> loo.get_n_splits(X) + 2 + >>> print(loo) + LeaveOneOut() + >>> for i, (train_index, test_index) in enumerate(loo.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1] + Test: index=[0] + Fold 1: + Train: index=[0] + Test: index=[1] + + See Also + -------- + LeaveOneGroupOut : For splitting the data according to explicit, + domain-specific stratification of the dataset. + GroupKFold : K-fold iterator variant with non-overlapping groups. + """ + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + if n_samples <= 1: + raise ValueError( + "Cannot perform LeaveOneOut with n_samples={}.".format(n_samples) + ) + return range(n_samples) + + def get_n_splits(self, X, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if X is None: + raise ValueError("The 'X' parameter should not be None.") + return _num_samples(X) + + +class LeavePOut(BaseCrossValidator): + """Leave-P-Out cross-validator. + + Provides train/test indices to split data in train/test sets. This results + in testing on all distinct samples of size p, while the remaining n - p + samples form the training set in each iteration. + + Note: ``LeavePOut(p)`` is NOT equivalent to + ``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets. + + Due to the high number of iterations which grows combinatorically with the + number of samples this cross-validation method can be very costly. For + large datasets one should favor :class:`KFold`, :class:`StratifiedKFold` + or :class:`ShuffleSplit`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + p : int + Size of the test sets. Must be strictly less than the number of + samples. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeavePOut + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + >>> y = np.array([1, 2, 3, 4]) + >>> lpo = LeavePOut(2) + >>> lpo.get_n_splits(X) + 6 + >>> print(lpo) + LeavePOut(p=2) + >>> for i, (train_index, test_index) in enumerate(lpo.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[2 3] + Test: index=[0 1] + Fold 1: + Train: index=[1 3] + Test: index=[0 2] + Fold 2: + Train: index=[1 2] + Test: index=[0 3] + Fold 3: + Train: index=[0 3] + Test: index=[1 2] + Fold 4: + Train: index=[0 2] + Test: index=[1 3] + Fold 5: + Train: index=[0 1] + Test: index=[2 3] + """ + + def __init__(self, p): + self.p = p + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + if n_samples <= self.p: + raise ValueError( + "p={} must be strictly less than the number of samples={}".format( + self.p, n_samples + ) + ) + for combination in combinations(range(n_samples), self.p): + yield np.array(combination) + + def get_n_splits(self, X, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + """ + if X is None: + raise ValueError("The 'X' parameter should not be None.") + return int(comb(_num_samples(X), self.p, exact=True)) + + +class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta): + """Base class for K-Fold cross-validators and TimeSeriesSplit.""" + + @abstractmethod + def __init__(self, n_splits, *, shuffle, random_state): + if not isinstance(n_splits, numbers.Integral): + raise ValueError( + "The number of folds must be of Integral type. " + "%s of type %s was passed." % (n_splits, type(n_splits)) + ) + n_splits = int(n_splits) + + if n_splits <= 1: + raise ValueError( + "k-fold cross-validation requires at least one" + " train/test split by setting n_splits=2 or more," + " got n_splits={0}.".format(n_splits) + ) + + if not isinstance(shuffle, bool): + raise TypeError("shuffle must be True or False; got {0}".format(shuffle)) + + if not shuffle and random_state is not None: # None is the default + raise ValueError( + ( + "Setting a random_state has no effect since shuffle is " + "False. You should leave " + "random_state to its default (None), or set shuffle=True." + ), + ) + + self.n_splits = n_splits + self.shuffle = shuffle + self.random_state = random_state + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + n_samples = _num_samples(X) + if self.n_splits > n_samples: + raise ValueError( + ( + "Cannot have number of splits n_splits={0} greater" + " than the number of samples: n_samples={1}." + ).format(self.n_splits, n_samples) + ) + + for train, test in super().split(X, y, groups): + yield train, test + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return self.n_splits + + +class KFold(_BaseKFold): + """K-Fold cross-validator. + + Provides train/test indices to split data in train/test sets. Split + dataset into k consecutive folds (without shuffling by default). + + Each fold is then used once as a validation while the k - 1 remaining + folds form the training set. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + shuffle : bool, default=False + Whether to shuffle the data before splitting into batches. + Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold. Otherwise, this + parameter has no effect. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import KFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([1, 2, 3, 4]) + >>> kf = KFold(n_splits=2) + >>> kf.get_n_splits(X) + 2 + >>> print(kf) + KFold(n_splits=2, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(kf.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[2 3] + Test: index=[0 1] + Fold 1: + Train: index=[0 1] + Test: index=[2 3] + + Notes + ----- + The first ``n_samples % n_splits`` folds have size + ``n_samples // n_splits + 1``, other folds have size + ``n_samples // n_splits``, where ``n_samples`` is the number of samples. + + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + StratifiedKFold : Takes class information into account to avoid building + folds with imbalanced class distributions (for binary or multiclass + classification tasks). + + GroupKFold : K-fold iterator variant with non-overlapping groups. + + RepeatedKFold : Repeats K-Fold n times. + """ + + def __init__(self, n_splits=5, *, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _iter_test_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + indices = np.arange(n_samples) + if self.shuffle: + check_random_state(self.random_state).shuffle(indices) + + n_splits = self.n_splits + fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int) + fold_sizes[: n_samples % n_splits] += 1 + current = 0 + for fold_size in fold_sizes: + start, stop = current, current + fold_size + yield indices[start:stop] + current = stop + + +class GroupKFold(GroupsConsumerMixin, _BaseKFold): + """K-fold iterator variant with non-overlapping groups. + + Each group will appear exactly once in the test set across all folds (the + number of distinct groups has to be at least equal to the number of folds). + + The folds are approximately balanced in the sense that the number of + distinct groups is approximately the same in each fold. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + Notes + ----- + Groups appear in an arbitrary order throughout the folds. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import GroupKFold + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> groups = np.array([0, 0, 2, 2, 3, 3]) + >>> group_kfold = GroupKFold(n_splits=2) + >>> group_kfold.get_n_splits(X, y, groups) + 2 + >>> print(group_kfold) + GroupKFold(n_splits=2) + >>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3], group=[2 2] + Test: index=[0 1 4 5], group=[0 0 3 3] + Fold 1: + Train: index=[0 1 4 5], group=[0 0 3 3] + Test: index=[2 3], group=[2 2] + + See Also + -------- + LeaveOneGroupOut : For splitting the data according to explicit + domain-specific stratification of the dataset. + + StratifiedKFold : Takes class information into account to avoid building + folds with imbalanced class proportions (for binary or multiclass + classification tasks). + """ + + def __init__(self, n_splits=5): + super().__init__(n_splits, shuffle=False, random_state=None) + + def _iter_test_indices(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + + unique_groups, groups = np.unique(groups, return_inverse=True) + n_groups = len(unique_groups) + + if self.n_splits > n_groups: + raise ValueError( + "Cannot have number of splits n_splits=%d greater" + " than the number of groups: %d." % (self.n_splits, n_groups) + ) + + # Weight groups by their number of occurrences + n_samples_per_group = np.bincount(groups) + + # Distribute the most frequent groups first + indices = np.argsort(n_samples_per_group)[::-1] + n_samples_per_group = n_samples_per_group[indices] + + # Total weight of each fold + n_samples_per_fold = np.zeros(self.n_splits) + + # Mapping from group index to fold index + group_to_fold = np.zeros(len(unique_groups)) + + # Distribute samples by adding the largest weight to the lightest fold + for group_index, weight in enumerate(n_samples_per_group): + lightest_fold = np.argmin(n_samples_per_fold) + n_samples_per_fold[lightest_fold] += weight + group_to_fold[indices[group_index]] = lightest_fold + + indices = group_to_fold[groups] + + for f in range(self.n_splits): + yield np.where(indices == f)[0] + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class StratifiedKFold(_BaseKFold): + """Stratified K-Fold cross-validator. + + Provides train/test indices to split data in train/test sets. + + This cross-validation object is a variation of KFold that returns + stratified folds. The folds are made by preserving the percentage of + samples for each class. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + shuffle : bool, default=False + Whether to shuffle each class's samples before splitting into batches. + Note that the samples within each split will not be shuffled. + + random_state : int, RandomState instance or None, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold for each class. + Otherwise, leave `random_state` as `None`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> skf = StratifiedKFold(n_splits=2) + >>> skf.get_n_splits(X, y) + 2 + >>> print(skf) + StratifiedKFold(n_splits=2, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(skf.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3] + Test: index=[0 2] + Fold 1: + Train: index=[0 2] + Test: index=[1 3] + + Notes + ----- + The implementation is designed to: + + * Generate test sets such that all contain the same distribution of + classes, or as close as possible. + * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to + ``y = [1, 0]`` should not change the indices generated. + * Preserve order dependencies in the dataset ordering, when + ``shuffle=False``: all samples from class k in some test set were + contiguous in y, or separated in y by samples from classes other than k. + * Generate test sets where the smallest and largest differ by at most one + sample. + + .. versionchanged:: 0.22 + The previous implementation did not follow the last constraint. + + See Also + -------- + RepeatedStratifiedKFold : Repeats Stratified K-Fold n times. + """ + + def __init__(self, n_splits=5, *, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _make_test_folds(self, X, y=None): + rng = check_random_state(self.random_state) + y = np.asarray(y) + type_of_target_y = type_of_target(y) + allowed_target_types = ("binary", "multiclass") + if type_of_target_y not in allowed_target_types: + raise ValueError( + "Supported target types are: {}. Got {!r} instead.".format( + allowed_target_types, type_of_target_y + ) + ) + + y = column_or_1d(y) + + _, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True) + # y_inv encodes y according to lexicographic order. We invert y_idx to + # map the classes so that they are encoded by order of appearance: + # 0 represents the first label appearing in y, 1 the second, etc. + _, class_perm = np.unique(y_idx, return_inverse=True) + y_encoded = class_perm[y_inv] + + n_classes = len(y_idx) + y_counts = np.bincount(y_encoded) + min_groups = np.min(y_counts) + if np.all(self.n_splits > y_counts): + raise ValueError( + "n_splits=%d cannot be greater than the" + " number of members in each class." % (self.n_splits) + ) + if self.n_splits > min_groups: + warnings.warn( + "The least populated class in y has only %d" + " members, which is less than n_splits=%d." + % (min_groups, self.n_splits), + UserWarning, + ) + + # Determine the optimal number of samples from each class in each fold, + # using round robin over the sorted y. (This can be done direct from + # counts, but that code is unreadable.) + y_order = np.sort(y_encoded) + allocation = np.asarray( + [ + np.bincount(y_order[i :: self.n_splits], minlength=n_classes) + for i in range(self.n_splits) + ] + ) + + # To maintain the data order dependencies as best as possible within + # the stratification constraint, we assign samples from each class in + # blocks (and then mess that up when shuffle=True). + test_folds = np.empty(len(y), dtype="i") + for k in range(n_classes): + # since the kth column of allocation stores the number of samples + # of class k in each test set, this generates blocks of fold + # indices corresponding to the allocation for class k. + folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k]) + if self.shuffle: + rng.shuffle(folds_for_class) + test_folds[y_encoded == k] = folds_for_class + return test_folds + + def _iter_test_masks(self, X, y=None, groups=None): + test_folds = self._make_test_folds(X, y) + for i in range(self.n_splits): + yield test_folds == i + + def split(self, X, y, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Note that providing ``y`` is sufficient to generate the splits and + hence ``np.zeros(n_samples)`` may be used as a placeholder for + ``X`` instead of actual training data. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + Stratification is done based on the y labels. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + return super().split(X, y, groups) + + +class StratifiedGroupKFold(GroupsConsumerMixin, _BaseKFold): + """Stratified K-Fold iterator variant with non-overlapping groups. + + This cross-validation object is a variation of StratifiedKFold attempts to + return stratified folds with non-overlapping groups. The folds are made by + preserving the percentage of samples for each class. + + Each group will appear exactly once in the test set across all folds (the + number of distinct groups has to be at least equal to the number of folds). + + The difference between :class:`~sklearn.model_selection.GroupKFold` + and :class:`~sklearn.model_selection.StratifiedGroupKFold` is that + the former attempts to create balanced folds such that the number of + distinct groups is approximately the same in each fold, whereas + StratifiedGroupKFold attempts to create folds which preserve the + percentage of samples for each class as much as possible given the + constraint of non-overlapping groups between splits. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + shuffle : bool, default=False + Whether to shuffle each class's samples before splitting into batches. + Note that the samples within each split will not be shuffled. + This implementation can only shuffle groups that have approximately the + same y distribution, no global shuffle will be performed. + + random_state : int or RandomState instance, default=None + When `shuffle` is True, `random_state` affects the ordering of the + indices, which controls the randomness of each fold for each class. + Otherwise, leave `random_state` as `None`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedGroupKFold + >>> X = np.ones((17, 2)) + >>> y = np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> groups = np.array([1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8]) + >>> sgkf = StratifiedGroupKFold(n_splits=3) + >>> sgkf.get_n_splits(X, y) + 3 + >>> print(sgkf) + StratifiedGroupKFold(n_splits=3, random_state=None, shuffle=False) + >>> for i, (train_index, test_index) in enumerate(sgkf.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" group={groups[train_index]}") + ... print(f" Test: index={test_index}") + ... print(f" group={groups[test_index]}") + Fold 0: + Train: index=[ 0 1 2 3 7 8 9 10 11 15 16] + group=[1 1 2 2 4 5 5 5 5 8 8] + Test: index=[ 4 5 6 12 13 14] + group=[3 3 3 6 6 7] + Fold 1: + Train: index=[ 4 5 6 7 8 9 10 11 12 13 14] + group=[3 3 3 4 5 5 5 5 6 6 7] + Test: index=[ 0 1 2 3 15 16] + group=[1 1 2 2 8 8] + Fold 2: + Train: index=[ 0 1 2 3 4 5 6 12 13 14 15 16] + group=[1 1 2 2 3 3 3 6 6 7 8 8] + Test: index=[ 7 8 9 10 11] + group=[4 5 5 5 5] + + Notes + ----- + The implementation is designed to: + + * Mimic the behavior of StratifiedKFold as much as possible for trivial + groups (e.g. when each group contains only one sample). + * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to + ``y = [1, 0]`` should not change the indices generated. + * Stratify based on samples as much as possible while keeping + non-overlapping groups constraint. That means that in some cases when + there is a small number of groups containing a large number of samples + the stratification will not be possible and the behavior will be close + to GroupKFold. + + See also + -------- + StratifiedKFold: Takes class information into account to build folds which + retain class distributions (for binary or multiclass classification + tasks). + + GroupKFold: K-fold iterator variant with non-overlapping groups. + """ + + def __init__(self, n_splits=5, shuffle=False, random_state=None): + super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) + + def _iter_test_indices(self, X, y, groups): + # Implementation is based on this kaggle kernel: + # https://www.kaggle.com/jakubwasikowski/stratified-group-k-fold-cross-validation + # and is a subject to Apache 2.0 License. You may obtain a copy of the + # License at http://www.apache.org/licenses/LICENSE-2.0 + # Changelist: + # - Refactored function to a class following scikit-learn KFold + # interface. + # - Added heuristic for assigning group to the least populated fold in + # cases when all other criteria are equal + # - Swtch from using python ``Counter`` to ``np.unique`` to get class + # distribution + # - Added scikit-learn checks for input: checking that target is binary + # or multiclass, checking passed random state, checking that number + # of splits is less than number of members in each class, checking + # that least populated class has more members than there are splits. + rng = check_random_state(self.random_state) + y = np.asarray(y) + type_of_target_y = type_of_target(y) + allowed_target_types = ("binary", "multiclass") + if type_of_target_y not in allowed_target_types: + raise ValueError( + "Supported target types are: {}. Got {!r} instead.".format( + allowed_target_types, type_of_target_y + ) + ) + + y = column_or_1d(y) + _, y_inv, y_cnt = np.unique(y, return_inverse=True, return_counts=True) + if np.all(self.n_splits > y_cnt): + raise ValueError( + "n_splits=%d cannot be greater than the" + " number of members in each class." % (self.n_splits) + ) + n_smallest_class = np.min(y_cnt) + if self.n_splits > n_smallest_class: + warnings.warn( + "The least populated class in y has only %d" + " members, which is less than n_splits=%d." + % (n_smallest_class, self.n_splits), + UserWarning, + ) + n_classes = len(y_cnt) + + _, groups_inv, groups_cnt = np.unique( + groups, return_inverse=True, return_counts=True + ) + y_counts_per_group = np.zeros((len(groups_cnt), n_classes)) + for class_idx, group_idx in zip(y_inv, groups_inv): + y_counts_per_group[group_idx, class_idx] += 1 + + y_counts_per_fold = np.zeros((self.n_splits, n_classes)) + groups_per_fold = defaultdict(set) + + if self.shuffle: + rng.shuffle(y_counts_per_group) + + # Stable sort to keep shuffled order for groups with the same + # class distribution variance + sorted_groups_idx = np.argsort( + -np.std(y_counts_per_group, axis=1), kind="mergesort" + ) + + for group_idx in sorted_groups_idx: + group_y_counts = y_counts_per_group[group_idx] + best_fold = self._find_best_fold( + y_counts_per_fold=y_counts_per_fold, + y_cnt=y_cnt, + group_y_counts=group_y_counts, + ) + y_counts_per_fold[best_fold] += group_y_counts + groups_per_fold[best_fold].add(group_idx) + + for i in range(self.n_splits): + test_indices = [ + idx + for idx, group_idx in enumerate(groups_inv) + if group_idx in groups_per_fold[i] + ] + yield test_indices + + def _find_best_fold(self, y_counts_per_fold, y_cnt, group_y_counts): + best_fold = None + min_eval = np.inf + min_samples_in_fold = np.inf + for i in range(self.n_splits): + y_counts_per_fold[i] += group_y_counts + # Summarise the distribution over classes in each proposed fold + std_per_class = np.std(y_counts_per_fold / y_cnt.reshape(1, -1), axis=0) + y_counts_per_fold[i] -= group_y_counts + fold_eval = np.mean(std_per_class) + samples_in_fold = np.sum(y_counts_per_fold[i]) + is_current_fold_better = ( + fold_eval < min_eval + or np.isclose(fold_eval, min_eval) + and samples_in_fold < min_samples_in_fold + ) + if is_current_fold_better: + min_eval = fold_eval + min_samples_in_fold = samples_in_fold + best_fold = i + return best_fold + + +class TimeSeriesSplit(_BaseKFold): + """Time Series cross-validator. + + Provides train/test indices to split time series data samples + that are observed at fixed time intervals, in train/test sets. + In each split, test indices must be higher than before, and thus shuffling + in cross validator is inappropriate. + + This cross-validation object is a variation of :class:`KFold`. + In the kth split, it returns first k folds as train set and the + (k+1)th fold as test set. + + Note that unlike standard cross-validation methods, successive + training sets are supersets of those that come before them. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + .. versionadded:: 0.18 + + Parameters + ---------- + n_splits : int, default=5 + Number of splits. Must be at least 2. + + .. versionchanged:: 0.22 + ``n_splits`` default value changed from 3 to 5. + + max_train_size : int, default=None + Maximum size for a single training set. + + test_size : int, default=None + Used to limit the size of the test set. Defaults to + ``n_samples // (n_splits + 1)``, which is the maximum allowed value + with ``gap=0``. + + .. versionadded:: 0.24 + + gap : int, default=0 + Number of samples to exclude from the end of each train set before + the test set. + + .. versionadded:: 0.24 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import TimeSeriesSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([1, 2, 3, 4, 5, 6]) + >>> tscv = TimeSeriesSplit() + >>> print(tscv) + TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0] + Test: index=[1] + Fold 1: + Train: index=[0 1] + Test: index=[2] + Fold 2: + Train: index=[0 1 2] + Test: index=[3] + Fold 3: + Train: index=[0 1 2 3] + Test: index=[4] + Fold 4: + Train: index=[0 1 2 3 4] + Test: index=[5] + >>> # Fix test_size to 2 with 12 samples + >>> X = np.random.randn(12, 2) + >>> y = np.random.randint(0, 2, 12) + >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0 1 2 3 4 5] + Test: index=[6 7] + Fold 1: + Train: index=[0 1 2 3 4 5 6 7] + Test: index=[8 9] + Fold 2: + Train: index=[0 1 2 3 4 5 6 7 8 9] + Test: index=[10 11] + >>> # Add in a 2 period gap + >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2) + >>> for i, (train_index, test_index) in enumerate(tscv.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[0 1 2 3] + Test: index=[6 7] + Fold 1: + Train: index=[0 1 2 3 4 5] + Test: index=[8 9] + Fold 2: + Train: index=[0 1 2 3 4 5 6 7] + Test: index=[10 11] + + For a more extended example see + :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py`. + + Notes + ----- + The training set has size ``i * n_samples // (n_splits + 1) + + n_samples % (n_splits + 1)`` in the ``i`` th split, + with a test set of size ``n_samples//(n_splits + 1)`` by default, + where ``n_samples`` is the number of samples. + """ + + def __init__(self, n_splits=5, *, max_train_size=None, test_size=None, gap=0): + super().__init__(n_splits, shuffle=False, random_state=None) + self.max_train_size = max_train_size + self.test_size = test_size + self.gap = gap + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + X, y, groups = indexable(X, y, groups) + n_samples = _num_samples(X) + n_splits = self.n_splits + n_folds = n_splits + 1 + gap = self.gap + test_size = ( + self.test_size if self.test_size is not None else n_samples // n_folds + ) + + # Make sure we have enough samples for the given split parameters + if n_folds > n_samples: + raise ValueError( + f"Cannot have number of folds={n_folds} greater" + f" than the number of samples={n_samples}." + ) + if n_samples - gap - (test_size * n_splits) <= 0: + raise ValueError( + f"Too many splits={n_splits} for number of samples" + f"={n_samples} with test_size={test_size} and gap={gap}." + ) + + indices = np.arange(n_samples) + test_starts = range(n_samples - n_splits * test_size, n_samples, test_size) + + for test_start in test_starts: + train_end = test_start - gap + if self.max_train_size and self.max_train_size < train_end: + yield ( + indices[train_end - self.max_train_size : train_end], + indices[test_start : test_start + test_size], + ) + else: + yield ( + indices[:train_end], + indices[test_start : test_start + test_size], + ) + + +class LeaveOneGroupOut(GroupsConsumerMixin, BaseCrossValidator): + """Leave One Group Out cross-validator. + + Provides train/test indices to split data such that each training set is + comprised of all samples except ones belonging to one specific group. + Arbitrary domain specific group information is provided an array integers + that encodes the group of each sample. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + Read more in the :ref:`User Guide `. + + Notes + ----- + Splits are ordered according to the index of the group left out. The first + split has testing set consisting of the group whose index in `groups` is + lowest, and so on. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeaveOneGroupOut + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + >>> y = np.array([1, 2, 1, 2]) + >>> groups = np.array([1, 1, 2, 2]) + >>> logo = LeaveOneGroupOut() + >>> logo.get_n_splits(X, y, groups) + 2 + >>> logo.get_n_splits(groups=groups) # 'groups' is always required + 2 + >>> print(logo) + LeaveOneGroupOut() + >>> for i, (train_index, test_index) in enumerate(logo.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3], group=[2 2] + Test: index=[0 1], group=[1 1] + Fold 1: + Train: index=[0 1], group=[1 1] + Test: index=[2 3], group=[2 2] + + See also + -------- + GroupKFold: K-fold iterator variant with non-overlapping groups. + """ + + def _iter_test_masks(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + # We make a copy of groups to avoid side-effects during iteration + groups = check_array( + groups, input_name="groups", copy=True, ensure_2d=False, dtype=None + ) + unique_groups = np.unique(groups) + if len(unique_groups) <= 1: + raise ValueError( + "The groups parameter contains fewer than 2 unique groups " + "(%s). LeaveOneGroupOut expects at least 2." % unique_groups + ) + for i in unique_groups: + yield groups == i + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. This 'groups' parameter must always be specified to + calculate the number of splits, though the other parameters can be + omitted. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + return len(np.unique(groups)) + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class LeavePGroupsOut(GroupsConsumerMixin, BaseCrossValidator): + """Leave P Group(s) Out cross-validator. + + Provides train/test indices to split data according to a third-party + provided group. This group information can be used to encode arbitrary + domain specific stratifications of the samples as integers. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + The difference between LeavePGroupsOut and LeaveOneGroupOut is that + the former builds the test sets with all the samples assigned to + ``p`` different values of the groups while the latter uses samples + all assigned the same groups. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_groups : int + Number of groups (``p``) to leave out in the test split. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import LeavePGroupsOut + >>> X = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([1, 2, 1]) + >>> groups = np.array([1, 2, 3]) + >>> lpgo = LeavePGroupsOut(n_groups=2) + >>> lpgo.get_n_splits(X, y, groups) + 3 + >>> lpgo.get_n_splits(groups=groups) # 'groups' is always required + 3 + >>> print(lpgo) + LeavePGroupsOut(n_groups=2) + >>> for i, (train_index, test_index) in enumerate(lpgo.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2], group=[3] + Test: index=[0 1], group=[1 2] + Fold 1: + Train: index=[1], group=[2] + Test: index=[0 2], group=[1 3] + Fold 2: + Train: index=[0], group=[1] + Test: index=[1 2], group=[2 3] + + See Also + -------- + GroupKFold : K-fold iterator variant with non-overlapping groups. + """ + + def __init__(self, n_groups): + self.n_groups = n_groups + + def _iter_test_masks(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array( + groups, input_name="groups", copy=True, ensure_2d=False, dtype=None + ) + unique_groups = np.unique(groups) + if self.n_groups >= len(unique_groups): + raise ValueError( + "The groups parameter contains fewer than (or equal to) " + "n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut " + "expects that at least n_groups + 1 (%d) unique groups be " + "present" % (self.n_groups, unique_groups, self.n_groups + 1) + ) + combi = combinations(range(len(unique_groups)), self.n_groups) + for indices in combi: + test_index = np.zeros(_num_samples(X), dtype=bool) + for l in unique_groups[np.array(indices)]: + test_index[groups == l] = True + yield test_index + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. This 'groups' parameter must always be specified to + calculate the number of splits, though the other parameters can be + omitted. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + return int(comb(len(np.unique(groups)), self.n_groups, exact=True)) + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + return super().split(X, y, groups) + + +class _RepeatedSplits(_MetadataRequester, metaclass=ABCMeta): + """Repeated splits for an arbitrary randomized CV splitter. + + Repeats splits for cross-validators n times with different randomization + in each repetition. + + Parameters + ---------- + cv : callable + Cross-validator class. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Passes `random_state` to the arbitrary repeating cross validator. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + **cvargs : additional params + Constructor parameters for cv. Must not contain random_state + and shuffle. + """ + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs): + if not isinstance(n_repeats, numbers.Integral): + raise ValueError("Number of repetitions must be of Integral type.") + + if n_repeats <= 0: + raise ValueError("Number of repetitions must be greater than 0.") + + if any(key in cvargs for key in ("random_state", "shuffle")): + raise ValueError("cvargs must not contain random_state or shuffle.") + + self.cv = cv + self.n_repeats = n_repeats + self.random_state = random_state + self.cvargs = cvargs + + def split(self, X, y=None, groups=None): + """Generates indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + n_repeats = self.n_repeats + rng = check_random_state(self.random_state) + + for idx in range(n_repeats): + cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) + for train_index, test_index in cv.split(X, y, groups): + yield train_index, test_index + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + ``np.zeros(n_samples)`` may be used as a placeholder. + + y : object + Always ignored, exists for compatibility. + ``np.zeros(n_samples)`` may be used as a placeholder. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + rng = check_random_state(self.random_state) + cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) + return cv.get_n_splits(X, y, groups) * self.n_repeats + + def __repr__(self): + return _build_repr(self) + + +class RepeatedKFold(_RepeatedSplits): + """Repeated K-Fold cross validator. + + Repeats K-Fold n times with different randomization in each repetition. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of each repeated cross-validation instance. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import RepeatedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124) + >>> rkf.get_n_splits(X, y) + 4 + >>> print(rkf) + RepeatedKFold(n_repeats=2, n_splits=2, random_state=2652124) + >>> for i, (train_index, test_index) in enumerate(rkf.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + ... + Fold 0: + Train: index=[0 1] + Test: index=[2 3] + Fold 1: + Train: index=[2 3] + Test: index=[0 1] + Fold 2: + Train: index=[1 2] + Test: index=[0 3] + Fold 3: + Train: index=[0 3] + Test: index=[1 2] + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + RepeatedStratifiedKFold : Repeats Stratified K-Fold n times. + """ + + def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): + super().__init__( + KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits + ) + + +class RepeatedStratifiedKFold(_RepeatedSplits): + """Repeated Stratified K-Fold cross validator. + + Repeats Stratified K-Fold n times with different randomization in each + repetition. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_splits : int, default=5 + Number of folds. Must be at least 2. + + n_repeats : int, default=10 + Number of times cross-validator needs to be repeated. + + random_state : int, RandomState instance or None, default=None + Controls the generation of the random states for each repetition. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import RepeatedStratifiedKFold + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, + ... random_state=36851234) + >>> rskf.get_n_splits(X, y) + 4 + >>> print(rskf) + RepeatedStratifiedKFold(n_repeats=2, n_splits=2, random_state=36851234) + >>> for i, (train_index, test_index) in enumerate(rskf.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + ... + Fold 0: + Train: index=[1 2] + Test: index=[0 3] + Fold 1: + Train: index=[0 3] + Test: index=[1 2] + Fold 2: + Train: index=[1 3] + Test: index=[0 2] + Fold 3: + Train: index=[0 2] + Test: index=[1 3] + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + + See Also + -------- + RepeatedKFold : Repeats K-Fold n times. + """ + + def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): + super().__init__( + StratifiedKFold, + n_repeats=n_repeats, + random_state=random_state, + n_splits=n_splits, + ) + + +class BaseShuffleSplit(_MetadataRequester, metaclass=ABCMeta): + """Base class for ShuffleSplit and StratifiedShuffleSplit.""" + + # This indicates that by default CV splitters don't have a "groups" kwarg, + # unless indicated by inheriting from ``GroupsConsumerMixin``. + # This also prevents ``set_split_request`` to be generated for splitters + # which don't support ``groups``. + __metadata_request__split = {"groups": metadata_routing.UNUSED} + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + self.n_splits = n_splits + self.test_size = test_size + self.train_size = train_size + self.random_state = random_state + self._default_test_size = 0.1 + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,) + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + X, y, groups = indexable(X, y, groups) + for train, test in self._iter_indices(X, y, groups): + yield train, test + + @abstractmethod + def _iter_indices(self, X, y=None, groups=None): + """Generate (train, test) indices""" + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return self.n_splits + + def __repr__(self): + return _build_repr(self) + + +class ShuffleSplit(BaseShuffleSplit): + """Random permutation cross-validator. + + Yields indices to split data into training and test sets. + + Note: contrary to other cross-validation strategies, random splits + do not guarantee that all folds will be different, although this is + still very likely for sizeable datasets. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.1. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import ShuffleSplit + >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]]) + >>> y = np.array([1, 2, 1, 2, 1, 2]) + >>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0) + >>> rs.get_n_splits(X) + 5 + >>> print(rs) + ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None) + >>> for i, (train_index, test_index) in enumerate(rs.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3 0 4] + Test: index=[5 2] + Fold 1: + Train: index=[4 0 2 5] + Test: index=[1 3] + Fold 2: + Train: index=[1 2 4 0] + Test: index=[3 5] + Fold 3: + Train: index=[3 4 1 0] + Test: index=[5 2] + Fold 4: + Train: index=[3 5 1 0] + Test: index=[2 4] + >>> # Specify train and test size + >>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25, + ... random_state=0) + >>> for i, (train_index, test_index) in enumerate(rs.split(X)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 3 0] + Test: index=[5 2] + Fold 1: + Train: index=[4 0 2] + Test: index=[1 3] + Fold 2: + Train: index=[1 2 4] + Test: index=[3 5] + Fold 3: + Train: index=[3 4 1] + Test: index=[5 2] + Fold 4: + Train: index=[3 5 1] + Test: index=[2 4] + """ + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.1 + + def _iter_indices(self, X, y=None, groups=None): + n_samples = _num_samples(X) + n_train, n_test = _validate_shuffle_split( + n_samples, + self.test_size, + self.train_size, + default_test_size=self._default_test_size, + ) + + rng = check_random_state(self.random_state) + for i in range(self.n_splits): + # random partition + permutation = rng.permutation(n_samples) + ind_test = permutation[:n_test] + ind_train = permutation[n_test : (n_test + n_train)] + yield ind_train, ind_test + + +class GroupShuffleSplit(GroupsConsumerMixin, ShuffleSplit): + """Shuffle-Group(s)-Out cross-validation iterator. + + Provides randomized train/test indices to split data according to a + third-party provided group. This group information can be used to encode + arbitrary domain specific stratifications of the samples as integers. + + For instance the groups could be the year of collection of the samples + and thus allow for cross-validation against time-based splits. + + The difference between LeavePGroupsOut and GroupShuffleSplit is that + the former generates splits using all subsets of size ``p`` unique groups, + whereas GroupShuffleSplit generates a user-determined number of random + test splits, each with a user-determined fraction of unique groups. + + For example, a less computationally intensive alternative to + ``LeavePGroupsOut(p=10)`` would be + ``GroupShuffleSplit(test_size=10, n_splits=100)``. + + Note: The parameters ``test_size`` and ``train_size`` refer to groups, and + not to samples, as in ShuffleSplit. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=5 + Number of re-shuffling & splitting iterations. + + test_size : float, int, default=0.2 + If float, should be between 0.0 and 1.0 and represent the proportion + of groups to include in the test split (rounded up). If int, + represents the absolute number of test groups. If None, the value is + set to the complement of the train size. + The default will change in version 0.21. It will remain 0.2 only + if ``train_size`` is unspecified, otherwise it will complement + the specified ``train_size``. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the groups to include in the train split. If + int, represents the absolute number of train groups. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import GroupShuffleSplit + >>> X = np.ones(shape=(8, 2)) + >>> y = np.ones(shape=(8, 1)) + >>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3]) + >>> print(groups.shape) + (8,) + >>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42) + >>> gss.get_n_splits() + 2 + >>> print(gss) + GroupShuffleSplit(n_splits=2, random_state=42, test_size=None, train_size=0.7) + >>> for i, (train_index, test_index) in enumerate(gss.split(X, y, groups)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}, group={groups[train_index]}") + ... print(f" Test: index={test_index}, group={groups[test_index]}") + Fold 0: + Train: index=[2 3 4 5 6 7], group=[2 2 2 3 3 3] + Test: index=[0 1], group=[1 1] + Fold 1: + Train: index=[0 1 5 6 7], group=[1 1 3 3 3] + Test: index=[2 3 4], group=[2 2 2] + + See Also + -------- + ShuffleSplit : Shuffles samples to create independent test/train sets. + + LeavePGroupsOut : Train set leaves out all possible subsets of `p` groups. + """ + + def __init__( + self, n_splits=5, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.2 + + def _iter_indices(self, X, y, groups): + if groups is None: + raise ValueError("The 'groups' parameter should not be None.") + groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None) + classes, group_indices = np.unique(groups, return_inverse=True) + for group_train, group_test in super()._iter_indices(X=classes): + # these are the indices of classes in the partition + # invert them into data indices + + train = np.flatnonzero(np.isin(group_indices, group_train)) + test = np.flatnonzero(np.isin(group_indices, group_test)) + + yield train, test + + def split(self, X, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : array-like of shape (n_samples,), default=None + The target variable for supervised learning problems. + + groups : array-like of shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + return super().split(X, y, groups) + + +class StratifiedShuffleSplit(BaseShuffleSplit): + """Stratified ShuffleSplit cross-validator. + + Provides train/test indices to split data in train/test sets. + + This cross-validation object is a merge of StratifiedKFold and + ShuffleSplit, which returns stratified randomized folds. The folds + are made by preserving the percentage of samples for each class. + + Note: like the ShuffleSplit strategy, stratified random splits + do not guarantee that all folds will be different, although this is + still very likely for sizeable datasets. + + Read more in the :ref:`User Guide `. + + For visualisation of cross-validation behaviour and + comparison between common scikit-learn split methods + refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py` + + Parameters + ---------- + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.1. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import StratifiedShuffleSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 0, 1, 1, 1]) + >>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) + >>> sss.get_n_splits(X, y) + 5 + >>> print(sss) + StratifiedShuffleSplit(n_splits=5, random_state=0, ...) + >>> for i, (train_index, test_index) in enumerate(sss.split(X, y)): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[5 2 3] + Test: index=[4 1 0] + Fold 1: + Train: index=[5 1 4] + Test: index=[0 2 3] + Fold 2: + Train: index=[5 0 2] + Test: index=[4 3 1] + Fold 3: + Train: index=[4 1 0] + Test: index=[2 3 5] + Fold 4: + Train: index=[0 5 1] + Test: index=[3 4 2] + """ + + def __init__( + self, n_splits=10, *, test_size=None, train_size=None, random_state=None + ): + super().__init__( + n_splits=n_splits, + test_size=test_size, + train_size=train_size, + random_state=random_state, + ) + self._default_test_size = 0.1 + + def _iter_indices(self, X, y, groups=None): + n_samples = _num_samples(X) + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + n_train, n_test = _validate_shuffle_split( + n_samples, + self.test_size, + self.train_size, + default_test_size=self._default_test_size, + ) + + if y.ndim == 2: + # for multi-label y, map each distinct row to a string repr + # using join because str(row) uses an ellipsis if len(row) > 1000 + y = np.array([" ".join(row.astype("str")) for row in y]) + + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + + class_counts = np.bincount(y_indices) + if np.min(class_counts) < 2: + raise ValueError( + "The least populated class in y has only 1" + " member, which is too few. The minimum" + " number of groups for any class cannot" + " be less than 2." + ) + + if n_train < n_classes: + raise ValueError( + "The train_size = %d should be greater or " + "equal to the number of classes = %d" % (n_train, n_classes) + ) + if n_test < n_classes: + raise ValueError( + "The test_size = %d should be greater or " + "equal to the number of classes = %d" % (n_test, n_classes) + ) + + # Find the sorted list of instances for each class: + # (np.unique above performs a sort, so code is O(n logn) already) + class_indices = np.split( + np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] + ) + + rng = check_random_state(self.random_state) + + for _ in range(self.n_splits): + # if there are ties in the class-counts, we want + # to make sure to break them anew in each iteration + n_i = _approximate_mode(class_counts, n_train, rng) + class_counts_remaining = class_counts - n_i + t_i = _approximate_mode(class_counts_remaining, n_test, rng) + + train = [] + test = [] + + for i in range(n_classes): + permutation = rng.permutation(class_counts[i]) + perm_indices_class_i = class_indices[i].take(permutation, mode="clip") + + train.extend(perm_indices_class_i[: n_i[i]]) + test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]) + + train = rng.permutation(train) + test = rng.permutation(test) + + yield train, test + + def split(self, X, y, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + Note that providing ``y`` is sufficient to generate the splits and + hence ``np.zeros(n_samples)`` may be used as a placeholder for + ``X`` instead of actual training data. + + y : array-like of shape (n_samples,) or (n_samples, n_labels) + The target variable for supervised learning problems. + Stratification is done based on the y labels. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + + Notes + ----- + Randomized CV splitters may return different results for each call of + split. You can make the results identical by setting `random_state` + to an integer. + """ + y = check_array(y, input_name="y", ensure_2d=False, dtype=None) + return super().split(X, y, groups) + + +def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None): + """ + Validation helper to check if the test/test sizes are meaningful w.r.t. the + size of the data (n_samples). + """ + if test_size is None and train_size is None: + test_size = default_test_size + + test_size_type = np.asarray(test_size).dtype.kind + train_size_type = np.asarray(train_size).dtype.kind + + if ( + test_size_type == "i" + and (test_size >= n_samples or test_size <= 0) + or test_size_type == "f" + and (test_size <= 0 or test_size >= 1) + ): + raise ValueError( + "test_size={0} should be either positive and smaller" + " than the number of samples {1} or a float in the " + "(0, 1) range".format(test_size, n_samples) + ) + + if ( + train_size_type == "i" + and (train_size >= n_samples or train_size <= 0) + or train_size_type == "f" + and (train_size <= 0 or train_size >= 1) + ): + raise ValueError( + "train_size={0} should be either positive and smaller" + " than the number of samples {1} or a float in the " + "(0, 1) range".format(train_size, n_samples) + ) + + if train_size is not None and train_size_type not in ("i", "f"): + raise ValueError("Invalid value for train_size: {}".format(train_size)) + if test_size is not None and test_size_type not in ("i", "f"): + raise ValueError("Invalid value for test_size: {}".format(test_size)) + + if train_size_type == "f" and test_size_type == "f" and train_size + test_size > 1: + raise ValueError( + "The sum of test_size and train_size = {}, should be in the (0, 1)" + " range. Reduce test_size and/or train_size.".format(train_size + test_size) + ) + + if test_size_type == "f": + n_test = ceil(test_size * n_samples) + elif test_size_type == "i": + n_test = float(test_size) + + if train_size_type == "f": + n_train = floor(train_size * n_samples) + elif train_size_type == "i": + n_train = float(train_size) + + if train_size is None: + n_train = n_samples - n_test + elif test_size is None: + n_test = n_samples - n_train + + if n_train + n_test > n_samples: + raise ValueError( + "The sum of train_size and test_size = %d, " + "should be smaller than the number of " + "samples %d. Reduce test_size and/or " + "train_size." % (n_train + n_test, n_samples) + ) + + n_train, n_test = int(n_train), int(n_test) + + if n_train == 0: + raise ValueError( + "With n_samples={}, test_size={} and train_size={}, the " + "resulting train set will be empty. Adjust any of the " + "aforementioned parameters.".format(n_samples, test_size, train_size) + ) + + return n_train, n_test + + +class PredefinedSplit(BaseCrossValidator): + """Predefined split cross-validator. + + Provides train/test indices to split data into train/test sets using a + predefined scheme specified by the user with the ``test_fold`` parameter. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.16 + + Parameters + ---------- + test_fold : array-like of shape (n_samples,) + The entry ``test_fold[i]`` represents the index of the test set that + sample ``i`` belongs to. It is possible to exclude sample ``i`` from + any test set (i.e. include sample ``i`` in every training set) by + setting ``test_fold[i]`` equal to -1. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import PredefinedSplit + >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + >>> y = np.array([0, 0, 1, 1]) + >>> test_fold = [0, 1, -1, 1] + >>> ps = PredefinedSplit(test_fold) + >>> ps.get_n_splits() + 2 + >>> print(ps) + PredefinedSplit(test_fold=array([ 0, 1, -1, 1])) + >>> for i, (train_index, test_index) in enumerate(ps.split()): + ... print(f"Fold {i}:") + ... print(f" Train: index={train_index}") + ... print(f" Test: index={test_index}") + Fold 0: + Train: index=[1 2 3] + Test: index=[0] + Fold 1: + Train: index=[0 2] + Test: index=[1 3] + """ + + def __init__(self, test_fold): + self.test_fold = np.array(test_fold, dtype=int) + self.test_fold = column_or_1d(self.test_fold) + self.unique_folds = np.unique(self.test_fold) + self.unique_folds = self.unique_folds[self.unique_folds != -1] + + def split(self, X=None, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + ind = np.arange(len(self.test_fold)) + for test_index in self._iter_test_masks(): + train_index = ind[np.logical_not(test_index)] + test_index = ind[test_index] + yield train_index, test_index + + def _iter_test_masks(self): + """Generates boolean masks corresponding to test sets.""" + for f in self.unique_folds: + test_index = np.where(self.test_fold == f)[0] + test_mask = np.zeros(len(self.test_fold), dtype=bool) + test_mask[test_index] = True + yield test_mask + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return len(self.unique_folds) + + +class _CVIterableWrapper(BaseCrossValidator): + """Wrapper class for old style cv objects and iterables.""" + + def __init__(self, cv): + self.cv = list(cv) + + def get_n_splits(self, X=None, y=None, groups=None): + """Returns the number of splitting iterations in the cross-validator. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Returns + ------- + n_splits : int + Returns the number of splitting iterations in the cross-validator. + """ + return len(self.cv) + + def split(self, X=None, y=None, groups=None): + """Generate indices to split data into training and test set. + + Parameters + ---------- + X : object + Always ignored, exists for compatibility. + + y : object + Always ignored, exists for compatibility. + + groups : object + Always ignored, exists for compatibility. + + Yields + ------ + train : ndarray + The training set indices for that split. + + test : ndarray + The testing set indices for that split. + """ + for train, test in self.cv: + yield train, test + + +def check_cv(cv=5, y=None, *, classifier=False): + """Input checker utility for building a cross-validator. + + Parameters + ---------- + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For integer/None inputs, if classifier is True and ``y`` is either + binary or multiclass, :class:`StratifiedKFold` is used. In all other + cases, :class:`KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value changed from 3-fold to 5-fold. + + y : array-like, default=None + The target variable for supervised learning problems. + + classifier : bool, default=False + Whether the task is a classification task, in which case + stratified KFold will be used. + + Returns + ------- + checked_cv : a cross-validator instance. + The return value is a cross-validator which generates the train/test + splits via the ``split`` method. + + Examples + -------- + >>> from sklearn.model_selection import check_cv + >>> check_cv(cv=5, y=None, classifier=False) + KFold(...) + >>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True) + StratifiedKFold(...) + """ + cv = 5 if cv is None else cv + if isinstance(cv, numbers.Integral): + if ( + classifier + and (y is not None) + and (type_of_target(y, input_name="y") in ("binary", "multiclass")) + ): + return StratifiedKFold(cv) + else: + return KFold(cv) + + if not hasattr(cv, "split") or isinstance(cv, str): + if not isinstance(cv, Iterable) or isinstance(cv, str): + raise ValueError( + "Expected cv as an integer, cross-validation " + "object (from sklearn.model_selection) " + "or an iterable. Got %s." % cv + ) + return _CVIterableWrapper(cv) + + return cv # New style cv objects are passed without any modification + + +@validate_params( + { + "test_size": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(numbers.Integral, 1, None, closed="left"), + None, + ], + "train_size": [ + Interval(RealNotInt, 0, 1, closed="neither"), + Interval(numbers.Integral, 1, None, closed="left"), + None, + ], + "random_state": ["random_state"], + "shuffle": ["boolean"], + "stratify": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def train_test_split( + *arrays, + test_size=None, + train_size=None, + random_state=None, + shuffle=True, + stratify=None, +): + """Split arrays or matrices into random train and test subsets. + + Quick utility that wraps input validation, + ``next(ShuffleSplit().split(X, y))``, and application to input data + into a single call for splitting (and optionally subsampling) data into a + one-liner. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + *arrays : sequence of indexables with same length / shape[0] + Allowed inputs are lists, numpy arrays, scipy-sparse + matrices or pandas dataframes. + + test_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to include in the test split. If int, represents the + absolute number of test samples. If None, the value is set to the + complement of the train size. If ``train_size`` is also None, it will + be set to 0.25. + + train_size : float or int, default=None + If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the train split. If + int, represents the absolute number of train samples. If None, + the value is automatically set to the complement of the test size. + + random_state : int, RandomState instance or None, default=None + Controls the shuffling applied to the data before applying the split. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + shuffle : bool, default=True + Whether or not to shuffle the data before splitting. If shuffle=False + then stratify must be None. + + stratify : array-like, default=None + If not None, data is split in a stratified fashion, using this as + the class labels. + Read more in the :ref:`User Guide `. + + Returns + ------- + splitting : list, length=2 * len(arrays) + List containing train-test split of inputs. + + .. versionadded:: 0.16 + If the input is sparse, the output will be a + ``scipy.sparse.csr_matrix``. Else, output type is the same as the + input type. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.model_selection import train_test_split + >>> X, y = np.arange(10).reshape((5, 2)), range(5) + >>> X + array([[0, 1], + [2, 3], + [4, 5], + [6, 7], + [8, 9]]) + >>> list(y) + [0, 1, 2, 3, 4] + + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.33, random_state=42) + ... + >>> X_train + array([[4, 5], + [0, 1], + [6, 7]]) + >>> y_train + [2, 0, 3] + >>> X_test + array([[2, 3], + [8, 9]]) + >>> y_test + [1, 4] + + >>> train_test_split(y, shuffle=False) + [[0, 1, 2], [3, 4]] + """ + n_arrays = len(arrays) + if n_arrays == 0: + raise ValueError("At least one array required as input") + + arrays = indexable(*arrays) + + n_samples = _num_samples(arrays[0]) + n_train, n_test = _validate_shuffle_split( + n_samples, test_size, train_size, default_test_size=0.25 + ) + + if shuffle is False: + if stratify is not None: + raise ValueError( + "Stratified train/test split is not implemented for shuffle=False" + ) + + train = np.arange(n_train) + test = np.arange(n_train, n_train + n_test) + + else: + if stratify is not None: + CVClass = StratifiedShuffleSplit + else: + CVClass = ShuffleSplit + + cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state) + + train, test = next(cv.split(X=arrays[0], y=stratify)) + + return list( + chain.from_iterable( + (_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays + ) + ) + + +# Tell nose that train_test_split is not a test. +# (Needed for external libraries that may use nose.) +# Use setattr to avoid mypy errors when monkeypatching. +setattr(train_test_split, "__test__", False) + + +def _pprint(params, offset=0, printer=repr): + """Pretty print the dictionary 'params' + + Parameters + ---------- + params : dict + The dictionary to pretty print + + offset : int, default=0 + The offset in characters to add at the begin of each line. + + printer : callable, default=repr + The function to convert entries to strings, typically + the builtin str or repr + + """ + # Do a multi-line justified repr: + options = np.get_printoptions() + np.set_printoptions(precision=5, threshold=64, edgeitems=2) + params_list = list() + this_line_length = offset + line_sep = ",\n" + (1 + offset // 2) * " " + for i, (k, v) in enumerate(sorted(params.items())): + if isinstance(v, float): + # use str for representing floating point numbers + # this way we get consistent representation across + # architectures and versions. + this_repr = "%s=%s" % (k, str(v)) + else: + # use repr of the rest + this_repr = "%s=%s" % (k, printer(v)) + if len(this_repr) > 500: + this_repr = this_repr[:300] + "..." + this_repr[-100:] + if i > 0: + if this_line_length + len(this_repr) >= 75 or "\n" in this_repr: + params_list.append(line_sep) + this_line_length = len(line_sep) + else: + params_list.append(", ") + this_line_length += 2 + params_list.append(this_repr) + this_line_length += len(this_repr) + + np.set_printoptions(**options) + lines = "".join(params_list) + # Strip trailing space to avoid nightmare in doctests + lines = "\n".join(l.rstrip(" ") for l in lines.split("\n")) + return lines + + +def _build_repr(self): + # XXX This is copied from BaseEstimator's get_params + cls = self.__class__ + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + # Ignore varargs, kw and default values and pop self + init_signature = signature(init) + # Consider the constructor parameters excluding 'self' + if init is object.__init__: + args = [] + else: + args = sorted( + [ + p.name + for p in init_signature.parameters.values() + if p.name != "self" and p.kind != p.VAR_KEYWORD + ] + ) + class_name = self.__class__.__name__ + params = dict() + for key in args: + # We need deprecation warnings to always be on in order to + # catch deprecated param values. + # This is set in utils/__init__.py but it gets overwritten + # when running under python3 somehow. + warnings.simplefilter("always", FutureWarning) + try: + with warnings.catch_warnings(record=True) as w: + value = getattr(self, key, None) + if value is None and hasattr(self, "cvargs"): + value = self.cvargs.get(key, None) + if len(w) and w[0].category == FutureWarning: + # if the parameter is deprecated, don't show it + continue + finally: + warnings.filters.pop(0) + params[key] = value + + return "%s(%s)" % (class_name, _pprint(params, offset=len(class_name))) + + +def _yields_constant_splits(cv): + # Return True if calling cv.split() always returns the same splits + # We assume that if a cv doesn't have a shuffle parameter, it shuffles by + # default (e.g. ShuffleSplit). If it actually doesn't shuffle (e.g. + # LeaveOneOut), then it won't have a random_state parameter anyway, in + # which case it will default to 0, leading to output=True + shuffle = getattr(cv, "shuffle", True) + random_state = getattr(cv, "random_state", 0) + return isinstance(random_state, numbers.Integral) or not shuffle diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_validation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..75c956f2d38a73229b8607d83c53913ab782e231 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/_validation.py @@ -0,0 +1,2360 @@ +""" +The :mod:`sklearn.model_selection._validation` module includes classes and +functions to validate the model. +""" + +# Author: Alexandre Gramfort +# Gael Varoquaux +# Olivier Grisel +# Raghav RV +# Michal Karbownik +# License: BSD 3 clause + + +import numbers +import time +import warnings +from collections import Counter +from contextlib import suppress +from functools import partial +from numbers import Real +from traceback import format_exc + +import numpy as np +import scipy.sparse as sp +from joblib import logger + +from ..base import clone, is_classifier +from ..exceptions import FitFailedWarning, UnsetMetadataPassedError +from ..metrics import check_scoring, get_scorer_names +from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer +from ..preprocessing import LabelEncoder +from ..utils import Bunch, _safe_indexing, check_random_state, indexable +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _routing_enabled, + process_routing, +) +from ..utils.metaestimators import _safe_split +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _check_method_params, _num_samples +from ._split import check_cv + +__all__ = [ + "cross_validate", + "cross_val_score", + "cross_val_predict", + "permutation_test_score", + "learning_curve", + "validation_curve", +] + + +def _check_params_groups_deprecation(fit_params, params, groups): + """A helper function to check deprecations on `groups` and `fit_params`. + + To be removed when set_config(enable_metadata_routing=False) is not possible. + """ + if params is not None and fit_params is not None: + raise ValueError( + "`params` and `fit_params` cannot both be provided. Pass parameters " + "via `params`. `fit_params` is deprecated and will be removed in " + "version 1.6." + ) + elif fit_params is not None: + warnings.warn( + ( + "`fit_params` is deprecated and will be removed in version 1.6. " + "Pass parameters via `params` instead." + ), + FutureWarning, + ) + params = fit_params + + params = {} if params is None else params + + if groups is not None and _routing_enabled(): + raise ValueError( + "`groups` can only be passed if metadata routing is not enabled via" + " `sklearn.set_config(enable_metadata_routing=True)`. When routing is" + " enabled, pass `groups` alongside other metadata via the `params` argument" + " instead." + ) + + return params + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str], + "return_train_score": ["boolean"], + "return_estimator": ["boolean"], + "return_indices": ["boolean"], + "error_score": [StrOptions({"raise"}), Real], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_validate( + estimator, + X, + y=None, + *, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + return_train_score=False, + return_estimator=False, + return_indices=False, + error_score=np.nan, +): + """Evaluate metric(s) by cross-validation and also record fit/score times. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be for example a list, or an array. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_validate(..., params={'groups': groups})``. + + scoring : str, callable, list, tuple, or dict, default=None + Strategy to evaluate the performance of the cross-validated model on + the test set. + + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit``, the scorer, + and the CV splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + return_train_score : bool, default=False + Whether to include train scores. + Computing training scores is used to get insights on how different + parameter settings impact the overfitting/underfitting trade-off. + However computing the scores on the training set can be computationally + expensive and is not strictly required to select the parameters that + yield the best generalization performance. + + .. versionadded:: 0.19 + + .. versionchanged:: 0.21 + Default value was changed from ``True`` to ``False`` + + return_estimator : bool, default=False + Whether to return the estimators fitted on each split. + + .. versionadded:: 0.20 + + return_indices : bool, default=False + Whether to return the train-test indices selected for each split. + + .. versionadded:: 1.3 + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + Returns + ------- + scores : dict of float arrays of shape (n_splits,) + Array of scores of the estimator for each run of the cross validation. + + A dict of arrays containing the score/time arrays for each scorer is + returned. The possible keys for this ``dict`` are: + + ``test_score`` + The score array for test scores on each cv split. + Suffix ``_score`` in ``test_score`` changes to a specific + metric like ``test_r2`` or ``test_auc`` if there are + multiple scoring metrics in the scoring parameter. + ``train_score`` + The score array for train scores on each cv split. + Suffix ``_score`` in ``train_score`` changes to a specific + metric like ``train_r2`` or ``train_auc`` if there are + multiple scoring metrics in the scoring parameter. + This is available only if ``return_train_score`` parameter + is ``True``. + ``fit_time`` + The time for fitting the estimator on the train + set for each cv split. + ``score_time`` + The time for scoring the estimator on the test set for each + cv split. (Note time for scoring on the train set is not + included even if ``return_train_score`` is set to ``True`` + ``estimator`` + The estimator objects for each cv split. + This is available only if ``return_estimator`` parameter + is set to ``True``. + ``indices`` + The train/test positional indices for each cv split. A dictionary + is returned where the keys are either `"train"` or `"test"` + and the associated values are a list of integer-dtyped NumPy + arrays with the indices. Available only if `return_indices=True`. + + See Also + -------- + cross_val_score : Run cross-validation for single metric evaluation. + + cross_val_predict : Get predictions from each split of cross-validation for + diagnostic purposes. + + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_validate + >>> from sklearn.metrics import make_scorer + >>> from sklearn.metrics import confusion_matrix + >>> from sklearn.svm import LinearSVC + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + + Single metric evaluation using ``cross_validate`` + + >>> cv_results = cross_validate(lasso, X, y, cv=3) + >>> sorted(cv_results.keys()) + ['fit_time', 'score_time', 'test_score'] + >>> cv_results['test_score'] + array([0.3315057 , 0.08022103, 0.03531816]) + + Multiple metric evaluation using ``cross_validate`` + (please refer the ``scoring`` parameter doc for more information) + + >>> scores = cross_validate(lasso, X, y, cv=3, + ... scoring=('r2', 'neg_mean_squared_error'), + ... return_train_score=True) + >>> print(scores['test_neg_mean_squared_error']) + [-3635.5... -3573.3... -6114.7...] + >>> print(scores['train_r2']) + [0.28009951 0.3908844 0.22784907] + """ + params = _check_params_groups_deprecation(fit_params, params, groups) + + X, y = indexable(X, y) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + + if callable(scoring): + scorers = scoring + elif scoring is None or isinstance(scoring, str): + scorers = check_scoring(estimator, scoring) + else: + scorers = _check_multimetric_scoring(estimator, scoring) + + if _routing_enabled(): + # `cross_validate` will create a `_MultiMetricScorer` if `scoring` is a + # dict at a later stage. We need the same object for the purpose of + # routing. However, creating it here and passing it around would create + # a much larger diff since the dict is used in many places. + if isinstance(scorers, dict): + _scorer = _MultimetricScorer( + scorers=scorers, raise_exc=(error_score == "raise") + ) + else: + _scorer = scorers + # For estimators, a MetadataRouter is created in get_metadata_routing + # methods. For these router methods, we create the router to use + # `process_routing` on it. + router = ( + MetadataRouter(owner="cross_validate") + .add( + splitter=cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + estimator=estimator, + # TODO(SLEP6): also pass metadata to the predict method for + # scoring? + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + .add( + scorer=_scorer, + method_mapping=MethodMapping().add(caller="fit", callee="score"), + ) + ) + try: + routed_params = process_routing(router, "fit", **params) + except UnsetMetadataPassedError as e: + # The default exception would mention `fit` since in the above + # `process_routing` code, we pass `fit` as the caller. However, + # the user is not calling `fit` directly, so we change the message + # to make it more suitable for this case. + unrequested_params = sorted(e.unrequested_params) + raise UnsetMetadataPassedError( + message=( + f"{unrequested_params} are passed to cross validation but are not" + " explicitly set as requested or not requested for cross_validate's" + f" estimator: {estimator.__class__.__name__}. Call" + " `.set_fit_request({{metadata}}=True)` on the estimator for" + f" each metadata in {unrequested_params} that you" + " want to use and `metadata=False` for not using it. See the" + " Metadata Routing User guide" + " for more" + " information." + ), + unrequested_params=e.unrequested_params, + routed_params=e.routed_params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={"groups": groups}) + routed_params.estimator = Bunch(fit=params) + routed_params.scorer = Bunch(score={}) + + indices = cv.split(X, y, **routed_params.splitter.split) + if return_indices: + # materialize the indices since we need to store them in the returned dict + indices = list(indices) + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorers, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=routed_params.estimator.fit, + score_params=routed_params.scorer.score, + return_train_score=return_train_score, + return_times=True, + return_estimator=return_estimator, + error_score=error_score, + ) + for train, test in indices + ) + + _warn_or_raise_about_fit_failures(results, error_score) + + # For callable scoring, the return type is only know after calling. If the + # return type is a dictionary, the error scores can now be inserted with + # the correct key. + if callable(scoring): + _insert_error_scores(results, error_score) + + results = _aggregate_score_dicts(results) + + ret = {} + ret["fit_time"] = results["fit_time"] + ret["score_time"] = results["score_time"] + + if return_estimator: + ret["estimator"] = results["estimator"] + + if return_indices: + ret["indices"] = {} + ret["indices"]["train"], ret["indices"]["test"] = zip(*indices) + + test_scores_dict = _normalize_score_results(results["test_scores"]) + if return_train_score: + train_scores_dict = _normalize_score_results(results["train_scores"]) + + for name in test_scores_dict: + ret["test_%s" % name] = test_scores_dict[name] + if return_train_score: + key = "train_%s" % name + ret[key] = train_scores_dict[name] + + return ret + + +def _insert_error_scores(results, error_score): + """Insert error in `results` by replacing them inplace with `error_score`. + + This only applies to multimetric scores because `_fit_and_score` will + handle the single metric case. + """ + successful_score = None + failed_indices = [] + for i, result in enumerate(results): + if result["fit_error"] is not None: + failed_indices.append(i) + elif successful_score is None: + successful_score = result["test_scores"] + + if isinstance(successful_score, dict): + formatted_error = {name: error_score for name in successful_score} + for i in failed_indices: + results[i]["test_scores"] = formatted_error.copy() + if "train_scores" in results[i]: + results[i]["train_scores"] = formatted_error.copy() + + +def _normalize_score_results(scores, scaler_score_key="score"): + """Creates a scoring dictionary based on the type of `scores`""" + if isinstance(scores[0], dict): + # multimetric scoring + return _aggregate_score_dicts(scores) + # scaler + return {scaler_score_key: scores} + + +def _warn_or_raise_about_fit_failures(results, error_score): + fit_errors = [ + result["fit_error"] for result in results if result["fit_error"] is not None + ] + if fit_errors: + num_failed_fits = len(fit_errors) + num_fits = len(results) + fit_errors_counter = Counter(fit_errors) + delimiter = "-" * 80 + "\n" + fit_errors_summary = "\n".join( + f"{delimiter}{n} fits failed with the following error:\n{error}" + for error, n in fit_errors_counter.items() + ) + + if num_failed_fits == num_fits: + all_fits_failed_message = ( + f"\nAll the {num_fits} fits failed.\n" + "It is very likely that your model is misconfigured.\n" + "You can try to debug the error by setting error_score='raise'.\n\n" + f"Below are more details about the failures:\n{fit_errors_summary}" + ) + raise ValueError(all_fits_failed_message) + + else: + some_fits_failed_message = ( + f"\n{num_failed_fits} fits failed out of a total of {num_fits}.\n" + "The score on these train-test partitions for these parameters" + f" will be set to {error_score}.\n" + "If these failures are not expected, you can try to debug them " + "by setting error_score='raise'.\n\n" + f"Below are more details about the failures:\n{fit_errors_summary}" + ) + warnings.warn(some_fits_failed_message, FitFailedWarning) + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str, None], + "error_score": [StrOptions({"raise"}), Real], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_val_score( + estimator, + X, + y=None, + *, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + error_score=np.nan, +): + """Evaluate a score by cross-validation. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be for example a list, or an array. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_val_score(..., params={'groups': groups})``. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)`` which should return only + a single value. + + Similar to :func:`cross_validate` + but only a single metric is permitted. + + If `None`, the estimator's default scorer (if available) is used. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - `None`, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For `int`/`None` inputs, if the estimator is a classifier and `y` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + `cv` default value if `None` changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit``, the scorer, + and the CV splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - ``None``, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + Returns + ------- + scores : ndarray of float of shape=(len(list(cv)),) + Array of scores of the estimator for each run of the cross validation. + + See Also + -------- + cross_validate : To run cross-validation on multiple metrics and also to + return train scores, fit times and score times. + + cross_val_predict : Get predictions from each split of cross-validation for + diagnostic purposes. + + sklearn.metrics.make_scorer : Make a scorer from a performance metric or + loss function. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_val_score + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + >>> print(cross_val_score(lasso, X, y, cv=3)) + [0.3315057 0.08022103 0.03531816] + """ + # To ensure multimetric format is not supported + scorer = check_scoring(estimator, scoring=scoring) + + cv_results = cross_validate( + estimator=estimator, + X=X, + y=y, + groups=groups, + scoring={"score": scorer}, + cv=cv, + n_jobs=n_jobs, + verbose=verbose, + fit_params=fit_params, + params=params, + pre_dispatch=pre_dispatch, + error_score=error_score, + ) + return cv_results["test_score"] + + +def _fit_and_score( + estimator, + X, + y, + *, + scorer, + train, + test, + verbose, + parameters, + fit_params, + score_params, + return_train_score=False, + return_parameters=False, + return_n_test_samples=False, + return_times=False, + return_estimator=False, + split_progress=None, + candidate_progress=None, + error_score=np.nan, +): + """Fit estimator and compute scores for a given dataset split. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : array-like of shape (n_samples, n_features) + The data to fit. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + scorer : A single callable or dict mapping scorer name to the callable + If it is a single callable, the return value for ``train_scores`` and + ``test_scores`` is a single float. + + For a dict, it should be one mapping the scorer name to the scorer + callable object / function. + + The callable object / fn should have signature + ``scorer(estimator, X, y)``. + + train : array-like of shape (n_train_samples,) + Indices of training samples. + + test : array-like of shape (n_test_samples,) + Indices of test samples. + + verbose : int + The verbosity level. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + parameters : dict or None + Parameters to be set on the estimator. + + fit_params : dict or None + Parameters that will be passed to ``estimator.fit``. + + score_params : dict or None + Parameters that will be passed to the scorer. + + return_train_score : bool, default=False + Compute and return score on training set. + + return_parameters : bool, default=False + Return parameters that has been used for the estimator. + + split_progress : {list, tuple} of int, default=None + A list or tuple of format (, ). + + candidate_progress : {list, tuple} of int, default=None + A list or tuple of format + (, ). + + return_n_test_samples : bool, default=False + Whether to return the ``n_test_samples``. + + return_times : bool, default=False + Whether to return the fit/score times. + + return_estimator : bool, default=False + Whether to return the fitted estimator. + + Returns + ------- + result : dict with the following attributes + train_scores : dict of scorer name -> float + Score on training set (for all the scorers), + returned only if `return_train_score` is `True`. + test_scores : dict of scorer name -> float + Score on testing set (for all the scorers). + n_test_samples : int + Number of test samples. + fit_time : float + Time spent for fitting in seconds. + score_time : float + Time spent for scoring in seconds. + parameters : dict or None + The parameters that have been evaluated. + estimator : estimator object + The fitted estimator. + fit_error : str or None + Traceback str if the fit failed, None if the fit succeeded. + """ + if not isinstance(error_score, numbers.Number) and error_score != "raise": + raise ValueError( + "error_score must be the string 'raise' or a numeric value. " + "(Hint: if using 'raise', please make sure that it has been " + "spelled correctly.)" + ) + + progress_msg = "" + if verbose > 2: + if split_progress is not None: + progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" + if candidate_progress and verbose > 9: + progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}" + + if verbose > 1: + if parameters is None: + params_msg = "" + else: + sorted_keys = sorted(parameters) # Ensure deterministic o/p + params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys) + if verbose > 9: + start_msg = f"[CV{progress_msg}] START {params_msg}" + print(f"{start_msg}{(80 - len(start_msg)) * '.'}") + + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_method_params(X, params=fit_params, indices=train) + score_params = score_params if score_params is not None else {} + score_params_train = _check_method_params(X, params=score_params, indices=train) + score_params_test = _check_method_params(X, params=score_params, indices=test) + + if parameters is not None: + # here we clone the parameters, since sometimes the parameters + # themselves might be estimators, e.g. when we search over different + # estimators in a pipeline. + # ref: https://github.com/scikit-learn/scikit-learn/pull/26786 + estimator = estimator.set_params(**clone(parameters, safe=False)) + + start_time = time.time() + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + result = {} + try: + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + + except Exception: + # Note fit time as time until error + fit_time = time.time() - start_time + score_time = 0.0 + if error_score == "raise": + raise + elif isinstance(error_score, numbers.Number): + if isinstance(scorer, dict): + test_scores = {name: error_score for name in scorer} + if return_train_score: + train_scores = test_scores.copy() + else: + test_scores = error_score + if return_train_score: + train_scores = error_score + result["fit_error"] = format_exc() + else: + result["fit_error"] = None + + fit_time = time.time() - start_time + test_scores = _score( + estimator, X_test, y_test, scorer, score_params_test, error_score + ) + score_time = time.time() - start_time - fit_time + if return_train_score: + train_scores = _score( + estimator, X_train, y_train, scorer, score_params_train, error_score + ) + + if verbose > 1: + total_time = score_time + fit_time + end_msg = f"[CV{progress_msg}] END " + result_msg = params_msg + (";" if params_msg else "") + if verbose > 2: + if isinstance(test_scores, dict): + for scorer_name in sorted(test_scores): + result_msg += f" {scorer_name}: (" + if return_train_score: + scorer_scores = train_scores[scorer_name] + result_msg += f"train={scorer_scores:.3f}, " + result_msg += f"test={test_scores[scorer_name]:.3f})" + else: + result_msg += ", score=" + if return_train_score: + result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})" + else: + result_msg += f"{test_scores:.3f}" + result_msg += f" total time={logger.short_format_time(total_time)}" + + # Right align the result_msg + end_msg += "." * (80 - len(end_msg) - len(result_msg)) + end_msg += result_msg + print(end_msg) + + result["test_scores"] = test_scores + if return_train_score: + result["train_scores"] = train_scores + if return_n_test_samples: + result["n_test_samples"] = _num_samples(X_test) + if return_times: + result["fit_time"] = fit_time + result["score_time"] = score_time + if return_parameters: + result["parameters"] = parameters + if return_estimator: + result["estimator"] = estimator + return result + + +def _score(estimator, X_test, y_test, scorer, score_params, error_score="raise"): + """Compute the score(s) of an estimator on a given test set. + + Will return a dict of floats if `scorer` is a dict, otherwise a single + float is returned. + """ + if isinstance(scorer, dict): + # will cache method calls if needed. scorer() returns a dict + scorer = _MultimetricScorer(scorers=scorer, raise_exc=(error_score == "raise")) + + score_params = {} if score_params is None else score_params + + try: + if y_test is None: + scores = scorer(estimator, X_test, **score_params) + else: + scores = scorer(estimator, X_test, y_test, **score_params) + except Exception: + if isinstance(scorer, _MultimetricScorer): + # If `_MultimetricScorer` raises exception, the `error_score` + # parameter is equal to "raise". + raise + else: + if error_score == "raise": + raise + else: + scores = error_score + warnings.warn( + ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n" + f"{format_exc()}" + ), + UserWarning, + ) + + # Check non-raised error messages in `_MultimetricScorer` + if isinstance(scorer, _MultimetricScorer): + exception_messages = [ + (name, str_e) for name, str_e in scores.items() if isinstance(str_e, str) + ] + if exception_messages: + # error_score != "raise" + for name, str_e in exception_messages: + scores[name] = error_score + warnings.warn( + ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n" + f"{str_e}" + ), + UserWarning, + ) + + error_msg = "scoring must return a number, got %s (%s) instead. (scorer=%s)" + if isinstance(scores, dict): + for name, score in scores.items(): + if hasattr(score, "item"): + with suppress(ValueError): + # e.g. unwrap memmapped scalars + score = score.item() + if not isinstance(score, numbers.Number): + raise ValueError(error_msg % (score, type(score), name)) + scores[name] = score + else: # scalar + if hasattr(scores, "item"): + with suppress(ValueError): + # e.g. unwrap memmapped scalars + scores = scores.item() + if not isinstance(scores, numbers.Number): + raise ValueError(error_msg % (scores, type(scores), scorer)) + return scores + + +@validate_params( + { + "estimator": [HasMethods(["fit", "predict"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + "verbose": ["verbose"], + "fit_params": [dict, None], + "params": [dict, None], + "pre_dispatch": [Integral, str, None], + "method": [ + StrOptions( + { + "predict", + "predict_proba", + "predict_log_proba", + "decision_function", + } + ) + ], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def cross_val_predict( + estimator, + X, + y=None, + *, + groups=None, + cv=None, + n_jobs=None, + verbose=0, + fit_params=None, + params=None, + pre_dispatch="2*n_jobs", + method="predict", +): + """Generate cross-validated estimates for each input data point. + + The data is split according to the cv parameter. Each sample belongs + to exactly one test set, and its prediction is computed with an + estimator fitted on the corresponding training set. + + Passing these predictions into an evaluation metric may not be a valid + way to measure generalization performance. Results can differ from + :func:`cross_validate` and :func:`cross_val_score` unless all tests sets + have equal size and the metric decomposes over samples. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator + The estimator instance to use to fit the data. It must implement a `fit` + method and the method given by the `method` parameter. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The data to fit. Can be, for example a list, or an array at least 2d. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + .. versionchanged:: 1.4 + ``groups`` can only be passed if metadata routing is not enabled + via ``sklearn.set_config(enable_metadata_routing=True)``. When routing + is enabled, pass ``groups`` alongside other metadata via the ``params`` + argument instead. E.g.: + ``cross_val_predict(..., params={'groups': groups})``. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable that generates (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and + predicting are parallelized over the cross-validation splits. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + The verbosity level. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. deprecated:: 1.4 + This parameter is deprecated and will be removed in version 1.6. Use + ``params`` instead. + + params : dict, default=None + Parameters to pass to the underlying estimator's ``fit`` and the CV + splitter. + + .. versionadded:: 1.4 + + pre_dispatch : int or str, default='2*n_jobs' + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + + - An int, giving the exact number of total jobs that are + spawned + + - A str, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + method : {'predict', 'predict_proba', 'predict_log_proba', \ + 'decision_function'}, default='predict' + The method to be invoked by `estimator`. + + Returns + ------- + predictions : ndarray + This is the result of calling `method`. Shape: + + - When `method` is 'predict' and in special case where `method` is + 'decision_function' and the target is binary: (n_samples,) + - When `method` is one of {'predict_proba', 'predict_log_proba', + 'decision_function'} (unless special case above): + (n_samples, n_classes) + - If `estimator` is :term:`multioutput`, an extra dimension + 'n_outputs' is added to the end of each shape above. + + See Also + -------- + cross_val_score : Calculate score for each CV split. + cross_validate : Calculate one or more scores and timings for each CV + split. + + Notes + ----- + In the case that one or more classes are absent in a training portion, a + default score needs to be assigned to all instances for that class if + ``method`` produces columns per class, as in {'decision_function', + 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is + 0. In order to ensure finite output, we approximate negative infinity by + the minimum finite float value for the dtype in other cases. + + Examples + -------- + >>> from sklearn import datasets, linear_model + >>> from sklearn.model_selection import cross_val_predict + >>> diabetes = datasets.load_diabetes() + >>> X = diabetes.data[:150] + >>> y = diabetes.target[:150] + >>> lasso = linear_model.Lasso() + >>> y_pred = cross_val_predict(lasso, X, y, cv=3) + """ + params = _check_params_groups_deprecation(fit_params, params, groups) + X, y = indexable(X, y) + + if _routing_enabled(): + # For estimators, a MetadataRouter is created in get_metadata_routing + # methods. For these router methods, we create the router to use + # `process_routing` on it. + router = ( + MetadataRouter(owner="cross_validate") + .add( + splitter=cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + estimator=estimator, + # TODO(SLEP6): also pass metadata for the predict method. + method_mapping=MethodMapping().add(caller="fit", callee="fit"), + ) + ) + try: + routed_params = process_routing(router, "fit", **params) + except UnsetMetadataPassedError as e: + # The default exception would mention `fit` since in the above + # `process_routing` code, we pass `fit` as the caller. However, + # the user is not calling `fit` directly, so we change the message + # to make it more suitable for this case. + unrequested_params = sorted(e.unrequested_params) + raise UnsetMetadataPassedError( + message=( + f"{unrequested_params} are passed to `cross_val_predict` but are" + " not explicitly set as requested or not requested for" + f" cross_validate's estimator: {estimator.__class__.__name__} Call" + " `.set_fit_request({{metadata}}=True)` on the estimator for" + f" each metadata in {unrequested_params} that you want to use and" + " `metadata=False` for not using it. See the Metadata Routing User" + " guide " + " for more information." + ), + unrequested_params=e.unrequested_params, + routed_params=e.routed_params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={"groups": groups}) + routed_params.estimator = Bunch(fit=params) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + splits = list(cv.split(X, y, **routed_params.splitter.split)) + + test_indices = np.concatenate([test for _, test in splits]) + if not _check_is_permutation(test_indices, _num_samples(X)): + raise ValueError("cross_val_predict only works for partitions") + + # If classification methods produce multiple columns of output, + # we need to manually encode classes to ensure consistent column ordering. + encode = ( + method in ["decision_function", "predict_proba", "predict_log_proba"] + and y is not None + ) + if encode: + y = np.asarray(y) + if y.ndim == 1: + le = LabelEncoder() + y = le.fit_transform(y) + elif y.ndim == 2: + y_enc = np.zeros_like(y, dtype=int) + for i_label in range(y.shape[1]): + y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) + y = y_enc + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) + predictions = parallel( + delayed(_fit_and_predict)( + clone(estimator), + X, + y, + train, + test, + routed_params.estimator.fit, + method, + ) + for train, test in splits + ) + + inv_test_indices = np.empty(len(test_indices), dtype=int) + inv_test_indices[test_indices] = np.arange(len(test_indices)) + + if sp.issparse(predictions[0]): + predictions = sp.vstack(predictions, format=predictions[0].format) + elif encode and isinstance(predictions[0], list): + # `predictions` is a list of method outputs from each fold. + # If each of those is also a list, then treat this as a + # multioutput-multiclass task. We need to separately concatenate + # the method outputs for each label into an `n_labels` long list. + n_labels = y.shape[1] + concat_pred = [] + for i_label in range(n_labels): + label_preds = np.concatenate([p[i_label] for p in predictions]) + concat_pred.append(label_preds) + predictions = concat_pred + else: + predictions = np.concatenate(predictions) + + if isinstance(predictions, list): + return [p[inv_test_indices] for p in predictions] + else: + return predictions[inv_test_indices] + + +def _fit_and_predict(estimator, X, y, train, test, fit_params, method): + """Fit estimator and predict values for a given dataset split. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' and 'predict' + The object to use to fit the data. + + X : array-like of shape (n_samples, n_features) + The data to fit. + + .. versionchanged:: 0.20 + X is only required to be an object with finite length or shape now + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + train : array-like of shape (n_train_samples,) + Indices of training samples. + + test : array-like of shape (n_test_samples,) + Indices of test samples. + + fit_params : dict or None + Parameters that will be passed to ``estimator.fit``. + + method : str + Invokes the passed method name of the passed estimator. + + Returns + ------- + predictions : sequence + Result of calling 'estimator.method' + """ + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + fit_params = _check_method_params(X, params=fit_params, indices=train) + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, _ = _safe_split(estimator, X, y, test, train) + + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + func = getattr(estimator, method) + predictions = func(X_test) + + encode = ( + method in ["decision_function", "predict_proba", "predict_log_proba"] + and y is not None + ) + + if encode: + if isinstance(predictions, list): + predictions = [ + _enforce_prediction_order( + estimator.classes_[i_label], + predictions[i_label], + n_classes=len(set(y[:, i_label])), + method=method, + ) + for i_label in range(len(predictions)) + ] + else: + # A 2D y array should be a binary label indicator matrix + n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] + predictions = _enforce_prediction_order( + estimator.classes_, predictions, n_classes, method + ) + return predictions + + +def _enforce_prediction_order(classes, predictions, n_classes, method): + """Ensure that prediction arrays have correct column order + + When doing cross-validation, if one or more classes are + not present in the subset of data used for training, + then the output prediction array might not have the same + columns as other folds. Use the list of class names + (assumed to be ints) to enforce the correct column order. + + Note that `classes` is the list of classes in this fold + (a subset of the classes in the full training set) + and `n_classes` is the number of classes in the full training set. + """ + if n_classes != len(classes): + recommendation = ( + "To fix this, use a cross-validation " + "technique resulting in properly " + "stratified folds" + ) + warnings.warn( + "Number of classes in training fold ({}) does " + "not match total number of classes ({}). " + "Results may not be appropriate for your use case. " + "{}".format(len(classes), n_classes, recommendation), + RuntimeWarning, + ) + if method == "decision_function": + if predictions.ndim == 2 and predictions.shape[1] != len(classes): + # This handles the case when the shape of predictions + # does not match the number of classes used to train + # it with. This case is found when sklearn.svm.SVC is + # set to `decision_function_shape='ovo'`. + raise ValueError( + "Output shape {} of {} does not match " + "number of classes ({}) in fold. " + "Irregular decision_function outputs " + "are not currently supported by " + "cross_val_predict".format(predictions.shape, method, len(classes)) + ) + if len(classes) <= 2: + # In this special case, `predictions` contains a 1D array. + raise ValueError( + "Only {} class/es in training fold, but {} " + "in overall dataset. This " + "is not supported for decision_function " + "with imbalanced folds. {}".format( + len(classes), n_classes, recommendation + ) + ) + + float_min = np.finfo(predictions.dtype).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + predictions_for_all_classes = np.full( + (_num_samples(predictions), n_classes), + default_values[method], + dtype=predictions.dtype, + ) + predictions_for_all_classes[:, classes] = predictions + predictions = predictions_for_all_classes + return predictions + + +def _check_is_permutation(indices, n_samples): + """Check whether indices is a reordering of the array np.arange(n_samples) + + Parameters + ---------- + indices : ndarray + int array to test + n_samples : int + number of expected elements + + Returns + ------- + is_partition : bool + True iff sorted(indices) is np.arange(n) + """ + if len(indices) != n_samples: + return False + hit = np.zeros(n_samples, dtype=bool) + hit[indices] = True + if not np.all(hit): + return False + return True + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "cv": ["cv_object"], + "n_permutations": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "verbose": ["verbose"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def permutation_test_score( + estimator, + X, + y, + *, + groups=None, + cv=None, + n_permutations=100, + n_jobs=None, + random_state=0, + verbose=0, + scoring=None, + fit_params=None, +): + """Evaluate the significance of a cross-validated score with permutations. + + Permutes targets to generate 'randomized data' and compute the empirical + p-value against the null hypothesis that features and targets are + independent. + + The p-value represents the fraction of randomized data sets where the + estimator performed as well or better than in the original data. A small + p-value suggests that there is a real dependency between features and + targets which has been used by the estimator to give good predictions. + A large p-value may be due to lack of real dependency between features + and targets or the estimator was not able to use the dependency to + give good predictions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + X : array-like of shape at least 2D + The data to fit. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + The target variable to try to predict in the case of + supervised learning. + + groups : array-like of shape (n_samples,), default=None + Labels to constrain permutation within groups, i.e. ``y`` values + are permuted among samples with the same group identifier. + When not specified, ``y`` values are permuted among all samples. + + When a grouped cross-validator is used, the group labels are + also passed on to the ``split`` method of the cross-validator. The + cross-validator uses them for grouping the samples while splitting + the dataset into train/test set. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - `None`, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For `int`/`None` inputs, if the estimator is a classifier and `y` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + `cv` default value if `None` changed from 3-fold to 5-fold. + + n_permutations : int, default=100 + Number of times to permute ``y``. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the cross-validated score are parallelized over the permutations. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance or None, default=0 + Pass an int for reproducible output for permutation of + ``y`` values among samples. See :term:`Glossary `. + + verbose : int, default=0 + The verbosity level. + + scoring : str or callable, default=None + A single str (see :ref:`scoring_parameter`) or a callable + (see :ref:`scoring`) to evaluate the predictions on the test set. + + If `None` the estimator's score method is used. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + score : float + The true score without permuting targets. + + permutation_scores : array of shape (n_permutations,) + The scores obtained for each permutations. + + pvalue : float + The p-value, which approximates the probability that the score would + be obtained by chance. This is calculated as: + + `(C + 1) / (n_permutations + 1)` + + Where C is the number of permutations whose score >= the true score. + + The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. + + Notes + ----- + This function implements Test 1 in: + + Ojala and Garriga. `Permutation Tests for Studying Classifier + Performance + `_. The + Journal of Machine Learning Research (2010) vol. 11 + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.model_selection import permutation_test_score + >>> X, y = make_classification(random_state=0) + >>> estimator = LogisticRegression() + >>> score, permutation_scores, pvalue = permutation_test_score( + ... estimator, X, y, random_state=0 + ... ) + >>> print(f"Original Score: {score:.3f}") + Original Score: 0.810 + >>> print( + ... f"Permutation Scores: {permutation_scores.mean():.3f} +/- " + ... f"{permutation_scores.std():.3f}" + ... ) + Permutation Scores: 0.505 +/- 0.057 + >>> print(f"P-value: {pvalue:.3f}") + P-value: 0.010 + """ + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + scorer = check_scoring(estimator, scoring=scoring) + random_state = check_random_state(random_state) + + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + score = _permutation_test_score( + clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params + ) + permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(_permutation_test_score)( + clone(estimator), + X, + _shuffle(y, groups, random_state), + groups, + cv, + scorer, + fit_params=fit_params, + ) + for _ in range(n_permutations) + ) + permutation_scores = np.array(permutation_scores) + pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) + return score, permutation_scores, pvalue + + +def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): + """Auxiliary function for permutation_test_score""" + # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} + avg_score = [] + for train, test in cv.split(X, y, groups): + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + fit_params = _check_method_params(X, params=fit_params, indices=train) + estimator.fit(X_train, y_train, **fit_params) + avg_score.append(scorer(estimator, X_test, y_test)) + return np.mean(avg_score) + + +def _shuffle(y, groups, random_state): + """Return a shuffled copy of y eventually shuffle among same groups.""" + if groups is None: + indices = random_state.permutation(len(y)) + else: + indices = np.arange(len(groups)) + for group in np.unique(groups): + this_mask = groups == group + indices[this_mask] = random_state.permutation(indices[this_mask]) + return _safe_indexing(y, indices) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "groups": ["array-like", None], + "train_sizes": ["array-like"], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "exploit_incremental_learning": ["boolean"], + "n_jobs": [Integral, None], + "pre_dispatch": [Integral, str], + "verbose": ["verbose"], + "shuffle": ["boolean"], + "random_state": ["random_state"], + "error_score": [StrOptions({"raise"}), Real], + "return_times": ["boolean"], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def learning_curve( + estimator, + X, + y, + *, + groups=None, + train_sizes=np.linspace(0.1, 1.0, 5), + cv=None, + scoring=None, + exploit_incremental_learning=False, + n_jobs=None, + pre_dispatch="all", + verbose=0, + shuffle=False, + random_state=None, + error_score=np.nan, + return_times=False, + fit_params=None, +): + """Learning curve. + + Determines cross-validated training and test scores for different training + set sizes. + + A cross-validation generator splits the whole dataset k times in training + and test data. Subsets of the training set with varying sizes will be used + to train the estimator and a score for each training subset size and the + test set will be computed. Afterwards, the scores will be averaged over + all k runs for each training subset size. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object type that implements the "fit" method + An object of that type which is cloned for each validation. It must + also implement "predict" unless `scoring` is a callable that doesn't + rely on "predict" to compute a score. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + train_sizes : array-like of shape (n_ticks,), \ + default=np.linspace(0.1, 1.0, 5) + Relative or absolute numbers of training examples that will be used to + generate the learning curve. If the dtype is float, it is regarded as a + fraction of the maximum size of the training set (that is determined + by the selected validation method), i.e. it has to be within (0, 1]. + Otherwise it is interpreted as absolute sizes of the training sets. + Note that for classification the number of samples usually have to + be big enough to contain at least one sample from each class. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + exploit_incremental_learning : bool, default=False + If the estimator supports incremental learning, this will be + used to speed up fitting for different training set sizes. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the different training and test sets. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + shuffle : bool, default=False + Whether to shuffle training data before taking prefixes of it + based on``train_sizes``. + + random_state : int, RandomState instance or None, default=None + Used when ``shuffle`` is True. Pass an int for reproducible + output across multiple function calls. + See :term:`Glossary `. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + return_times : bool, default=False + Whether to return the fit and score times. + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + train_sizes_abs : array of shape (n_unique_ticks,) + Numbers of training examples that has been used to generate the + learning curve. Note that the number of ticks might be less + than n_ticks because duplicate entries will be removed. + + train_scores : array of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : array of shape (n_ticks, n_cv_folds) + Scores on test set. + + fit_times : array of shape (n_ticks, n_cv_folds) + Times spent for fitting in seconds. Only present if ``return_times`` + is True. + + score_times : array of shape (n_ticks, n_cv_folds) + Times spent for scoring in seconds. Only present if ``return_times`` + is True. + + Examples + -------- + >>> from sklearn.datasets import make_classification + >>> from sklearn.tree import DecisionTreeClassifier + >>> from sklearn.model_selection import learning_curve + >>> X, y = make_classification(n_samples=100, n_features=10, random_state=42) + >>> tree = DecisionTreeClassifier(max_depth=4, random_state=42) + >>> train_size_abs, train_scores, test_scores = learning_curve( + ... tree, X, y, train_sizes=[0.3, 0.6, 0.9] + ... ) + >>> for train_size, cv_train_scores, cv_test_scores in zip( + ... train_size_abs, train_scores, test_scores + ... ): + ... print(f"{train_size} samples were used to train the model") + ... print(f"The average train accuracy is {cv_train_scores.mean():.2f}") + ... print(f"The average test accuracy is {cv_test_scores.mean():.2f}") + 24 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.85 + 48 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.90 + 72 samples were used to train the model + The average train accuracy is 1.00 + The average test accuracy is 0.93 + """ + if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): + raise ValueError( + "An estimator must support the partial_fit interface " + "to exploit incremental learning" + ) + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + # Store it as list as we will be iterating over the list multiple times + cv_iter = list(cv.split(X, y, groups)) + + scorer = check_scoring(estimator, scoring=scoring) + + n_max_training_samples = len(cv_iter[0][0]) + # Because the lengths of folds can be significantly different, it is + # not guaranteed that we use all of the available training data when we + # use the first 'n_max_training_samples' samples. + train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) + n_unique_ticks = train_sizes_abs.shape[0] + if verbose > 0: + print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) + + parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) + + if shuffle: + rng = check_random_state(random_state) + cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) + + if exploit_incremental_learning: + classes = np.unique(y) if is_classifier(estimator) else None + out = parallel( + delayed(_incremental_fit_estimator)( + clone(estimator), + X, + y, + classes, + train, + test, + train_sizes_abs, + scorer, + return_times, + error_score=error_score, + fit_params=fit_params, + ) + for train, test in cv_iter + ) + out = np.asarray(out).transpose((2, 1, 0)) + else: + train_test_proportions = [] + for train, test in cv_iter: + for n_train_samples in train_sizes_abs: + train_test_proportions.append((train[:n_train_samples], test)) + + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=fit_params, + # TODO(SLEP6): support score params here + score_params=None, + return_train_score=True, + error_score=error_score, + return_times=return_times, + ) + for train, test in train_test_proportions + ) + _warn_or_raise_about_fit_failures(results, error_score) + results = _aggregate_score_dicts(results) + train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T + test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T + out = [train_scores, test_scores] + + if return_times: + fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T + score_times = results["score_time"].reshape(-1, n_unique_ticks).T + out.extend([fit_times, score_times]) + + ret = train_sizes_abs, out[0], out[1] + + if return_times: + ret = ret + (out[2], out[3]) + + return ret + + +def _translate_train_sizes(train_sizes, n_max_training_samples): + """Determine absolute sizes of training subsets and validate 'train_sizes'. + + Examples: + _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] + _translate_train_sizes([5, 10], 10) -> [5, 10] + + Parameters + ---------- + train_sizes : array-like of shape (n_ticks,) + Numbers of training examples that will be used to generate the + learning curve. If the dtype is float, it is regarded as a + fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. + + n_max_training_samples : int + Maximum number of training samples (upper bound of 'train_sizes'). + + Returns + ------- + train_sizes_abs : array of shape (n_unique_ticks,) + Numbers of training examples that will be used to generate the + learning curve. Note that the number of ticks might be less + than n_ticks because duplicate entries will be removed. + """ + train_sizes_abs = np.asarray(train_sizes) + n_ticks = train_sizes_abs.shape[0] + n_min_required_samples = np.min(train_sizes_abs) + n_max_required_samples = np.max(train_sizes_abs) + if np.issubdtype(train_sizes_abs.dtype, np.floating): + if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: + raise ValueError( + "train_sizes has been interpreted as fractions " + "of the maximum number of training samples and " + "must be within (0, 1], but is within [%f, %f]." + % (n_min_required_samples, n_max_required_samples) + ) + train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( + dtype=int, copy=False + ) + train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) + else: + if ( + n_min_required_samples <= 0 + or n_max_required_samples > n_max_training_samples + ): + raise ValueError( + "train_sizes has been interpreted as absolute " + "numbers of training samples and must be within " + "(0, %d], but is within [%d, %d]." + % ( + n_max_training_samples, + n_min_required_samples, + n_max_required_samples, + ) + ) + + train_sizes_abs = np.unique(train_sizes_abs) + if n_ticks > train_sizes_abs.shape[0]: + warnings.warn( + "Removed duplicate entries from 'train_sizes'. Number " + "of ticks will be less than the size of " + "'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks), + RuntimeWarning, + ) + + return train_sizes_abs + + +def _incremental_fit_estimator( + estimator, + X, + y, + classes, + train, + test, + train_sizes, + scorer, + return_times, + error_score, + fit_params, +): + """Train estimator on training subsets incrementally and compute scores.""" + train_scores, test_scores, fit_times, score_times = [], [], [], [] + partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) + if fit_params is None: + fit_params = {} + if classes is None: + partial_fit_func = partial(estimator.partial_fit, **fit_params) + else: + partial_fit_func = partial(estimator.partial_fit, classes=classes, **fit_params) + + for n_train_samples, partial_train in partitions: + train_subset = train[:n_train_samples] + X_train, y_train = _safe_split(estimator, X, y, train_subset) + X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) + X_test, y_test = _safe_split(estimator, X, y, test, train_subset) + start_fit = time.time() + if y_partial_train is None: + partial_fit_func(X_partial_train) + else: + partial_fit_func(X_partial_train, y_partial_train) + fit_time = time.time() - start_fit + fit_times.append(fit_time) + + start_score = time.time() + + # TODO(SLEP6): support score params in the following two calls + test_scores.append( + _score( + estimator, + X_test, + y_test, + scorer, + score_params=None, + error_score=error_score, + ) + ) + train_scores.append( + _score( + estimator, + X_train, + y_train, + scorer, + score_params=None, + error_score=error_score, + ) + ) + score_time = time.time() - start_score + score_times.append(score_time) + + ret = ( + (train_scores, test_scores, fit_times, score_times) + if return_times + else (train_scores, test_scores) + ) + + return np.array(ret).T + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like", "sparse matrix"], + "y": ["array-like", None], + "param_name": [str], + "param_range": ["array-like"], + "groups": ["array-like", None], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "n_jobs": [Integral, None], + "pre_dispatch": [Integral, str], + "verbose": ["verbose"], + "error_score": [StrOptions({"raise"}), Real], + "fit_params": [dict, None], + }, + prefer_skip_nested_validation=False, # estimator is not validated yet +) +def validation_curve( + estimator, + X, + y, + *, + param_name, + param_range, + groups=None, + cv=None, + scoring=None, + n_jobs=None, + pre_dispatch="all", + verbose=0, + error_score=np.nan, + fit_params=None, +): + """Validation curve. + + Determine training and test scores for varying parameter values. + + Compute scores for an estimator with different values of a specified + parameter. This is similar to grid search with one parameter. However, this + will also compute training scores and is merely a utility for plotting the + results. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object type that implements the "fit" method + An object of that type which is cloned for each validation. It must + also implement "predict" unless `scoring` is a callable that doesn't + rely on "predict" to compute a score. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None + Target relative to X for classification or regression; + None for unsupervised learning. + + param_name : str + Name of the parameter that will be varied. + + param_range : array-like of shape (n_values,) + The values of the parameter that will be evaluated. + + groups : array-like of shape (n_samples,), default=None + Group labels for the samples used while splitting the dataset into + train/test set. Only used in conjunction with a "Group" :term:`cv` + instance (e.g., :class:`GroupKFold`). + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - int, to specify the number of folds in a `(Stratified)KFold`, + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For int/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, :class:`StratifiedKFold` is used. In all + other cases, :class:`KFold` is used. These splitters are instantiated + with `shuffle=False` so the splits will be the same across calls. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + scoring : str or callable, default=None + A str (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + n_jobs : int, default=None + Number of jobs to run in parallel. Training the estimator and computing + the score are parallelized over the combinations of each parameter + value and each cross-validation split. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + pre_dispatch : int or str, default='all' + Number of predispatched jobs for parallel execution (default is + all). The option can reduce the allocated memory. The str can + be an expression like '2*n_jobs'. + + verbose : int, default=0 + Controls the verbosity: the higher, the more messages. + + error_score : 'raise' or numeric, default=np.nan + Value to assign to the score if an error occurs in estimator fitting. + If set to 'raise', the error is raised. + If a numeric value is given, FitFailedWarning is raised. + + .. versionadded:: 0.20 + + fit_params : dict, default=None + Parameters to pass to the fit method of the estimator. + + .. versionadded:: 0.24 + + Returns + ------- + train_scores : array of shape (n_ticks, n_cv_folds) + Scores on training sets. + + test_scores : array of shape (n_ticks, n_cv_folds) + Scores on test set. + + Notes + ----- + See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import validation_curve + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(n_samples=1_000, random_state=0) + >>> logistic_regression = LogisticRegression() + >>> param_name, param_range = "C", np.logspace(-8, 3, 10) + >>> train_scores, test_scores = validation_curve( + ... logistic_regression, X, y, param_name=param_name, param_range=param_range + ... ) + >>> print(f"The average train accuracy is {train_scores.mean():.2f}") + The average train accuracy is 0.81 + >>> print(f"The average test accuracy is {test_scores.mean():.2f}") + The average test accuracy is 0.81 + """ + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + scorer = check_scoring(estimator, scoring=scoring) + + parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) + results = parallel( + delayed(_fit_and_score)( + clone(estimator), + X, + y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters={param_name: v}, + fit_params=fit_params, + # TODO(SLEP6): support score params here + score_params=None, + return_train_score=True, + error_score=error_score, + ) + # NOTE do not change order of iteration to allow one time cv splitters + for train, test in cv.split(X, y, groups) + for v in param_range + ) + n_params = len(param_range) + + results = _aggregate_score_dicts(results) + train_scores = results["train_scores"].reshape(-1, n_params).T + test_scores = results["test_scores"].reshape(-1, n_params).T + + return train_scores, test_scores + + +def _aggregate_score_dicts(scores): + """Aggregate the list of dict to dict of np ndarray + + The aggregated output of _aggregate_score_dicts will be a list of dict + of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] + Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} + + Parameters + ---------- + + scores : list of dict + List of dicts of the scores for all scorers. This is a flat list, + assumed originally to be of row major order. + + Example + ------- + + >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, + ... {'a': 10, 'b': 10}] # doctest: +SKIP + >>> _aggregate_score_dicts(scores) # doctest: +SKIP + {'a': array([1, 2, 3, 10]), + 'b': array([10, 2, 3, 10])} + """ + return { + key: ( + np.asarray([score[key] for score in scores]) + if isinstance(scores[0][key], numbers.Number) + else [score[key] for score in scores] + ) + for key in scores[0] + } diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e6622d69fcf0dad4a5e82a605b5731852e2a102 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0b4ff58b0dc7a909cfe648f02c2f0a7e86d02f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d437d860df58a9e37903e8b84800fc489358ef7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_plot.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe3000e8ee99fcfc7242da22d090e678374eac3a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_search.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85ffd79180b0164af75e6c3d74cb015b05792222 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_split.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2f9fac1421b8b33a912dfd07187dad9e2c3fd6c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_successive_halving.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86157632c6cbe2b65f2d35bc7ff838d604faf44b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/__pycache__/test_validation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..54a993db76933a5e710f0ddd20a4efd0118ecf95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/common.py @@ -0,0 +1,24 @@ +""" +Common utilities for testing model selection. +""" + +import numpy as np + +from sklearn.model_selection import KFold + + +class OneTimeSplitter: + """A wrapper to make KFold single entry cv iterator""" + + def __init__(self, n_splits=4, n_samples=99): + self.n_splits = n_splits + self.n_samples = n_samples + self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples))) + + def split(self, X=None, y=None, groups=None): + """Split can be called only once""" + for index in self.indices: + yield index + + def get_n_splits(self, X=None, y=None, groups=None): + return self.n_splits diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..1a7268150fd90ceaa67d18a8455e85941e0c016e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_plot.py @@ -0,0 +1,595 @@ +import numpy as np +import pytest + +from sklearn.datasets import load_iris +from sklearn.model_selection import ( + LearningCurveDisplay, + ValidationCurveDisplay, + learning_curve, + validation_curve, +) +from sklearn.tree import DecisionTreeClassifier +from sklearn.utils import shuffle +from sklearn.utils._testing import assert_allclose, assert_array_equal + + +@pytest.fixture +def data(): + return shuffle(*load_iris(return_X_y=True), random_state=0) + + +@pytest.mark.parametrize( + "params, err_type, err_msg", + [ + ({"std_display_style": "invalid"}, ValueError, "Unknown std_display_style:"), + ({"score_type": "invalid"}, ValueError, "Unknown score_type:"), + ], +) +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_parameters_validation( + pyplot, data, params, err_type, err_msg, CurveDisplay, specific_params +): + """Check that we raise a proper error when passing invalid parameters.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + with pytest.raises(err_type, match=err_msg): + CurveDisplay.from_estimator(estimator, X, y, **specific_params, **params) + + +def test_learning_curve_display_default_usage(pyplot, data): + """Check the default usage of the LearningCurveDisplay class.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + train_sizes = [0.3, 0.6, 0.9] + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=train_sizes + ) + + import matplotlib as mpl + + assert display.errorbar_ is None + + assert isinstance(display.lines_, list) + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + + assert isinstance(display.fill_between_, list) + for fill in display.fill_between_: + assert isinstance(fill, mpl.collections.PolyCollection) + assert fill.get_alpha() == 0.5 + + assert display.score_name == "Score" + assert display.ax_.get_xlabel() == "Number of samples in the training set" + assert display.ax_.get_ylabel() == "Score" + + _, legend_labels = display.ax_.get_legend_handles_labels() + assert legend_labels == ["Train", "Test"] + + train_sizes_abs, train_scores, test_scores = learning_curve( + estimator, X, y, train_sizes=train_sizes + ) + + assert_array_equal(display.train_sizes, train_sizes_abs) + assert_allclose(display.train_scores, train_scores) + assert_allclose(display.test_scores, test_scores) + + +def test_validation_curve_display_default_usage(pyplot, data): + """Check the default usage of the ValidationCurveDisplay class.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name, param_range = "max_depth", [1, 3, 5] + display = ValidationCurveDisplay.from_estimator( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + import matplotlib as mpl + + assert display.errorbar_ is None + + assert isinstance(display.lines_, list) + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + + assert isinstance(display.fill_between_, list) + for fill in display.fill_between_: + assert isinstance(fill, mpl.collections.PolyCollection) + assert fill.get_alpha() == 0.5 + + assert display.score_name == "Score" + assert display.ax_.get_xlabel() == f"{param_name}" + assert display.ax_.get_ylabel() == "Score" + + _, legend_labels = display.ax_.get_legend_handles_labels() + assert legend_labels == ["Train", "Test"] + + train_scores, test_scores = validation_curve( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + assert_array_equal(display.param_range, param_range) + assert_allclose(display.train_scores, train_scores) + assert_allclose(display.test_scores, test_scores) + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the `negate_score` parameter calling `from_estimator` and + `plot`. + """ + X, y = data + estimator = DecisionTreeClassifier(max_depth=1, random_state=0) + + negate_score = False + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + + positive_scores = display.lines_[0].get_data()[1] + assert (positive_scores >= 0).all() + assert display.ax_.get_ylabel() == "Score" + + negate_score = True + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + + negative_scores = display.lines_[0].get_data()[1] + assert (negative_scores <= 0).all() + assert_allclose(negative_scores, -positive_scores) + assert display.ax_.get_ylabel() == "Negative score" + + negate_score = False + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, negate_score=negate_score + ) + assert display.ax_.get_ylabel() == "Score" + display.plot(negate_score=not negate_score) + assert display.ax_.get_ylabel() == "Score" + assert (display.lines_[0].get_data()[1] < 0).all() + + +@pytest.mark.parametrize( + "score_name, ylabel", [(None, "Score"), ("Accuracy", "Accuracy")] +) +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_score_name( + pyplot, data, score_name, ylabel, CurveDisplay, specific_params +): + """Check that we can overwrite the default score name shown on the y-axis.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, score_name=score_name + ) + + assert display.ax_.get_ylabel() == ylabel + X, y = data + estimator = DecisionTreeClassifier(max_depth=1, random_state=0) + + display = CurveDisplay.from_estimator( + estimator, X, y, **specific_params, score_name=score_name + ) + + assert display.score_name == ylabel + + +@pytest.mark.parametrize("std_display_style", (None, "errorbar")) +def test_learning_curve_display_score_type(pyplot, data, std_display_style): + """Check the behaviour of setting the `score_type` parameter.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + train_sizes = [0.3, 0.6, 0.9] + train_sizes_abs, train_scores, test_scores = learning_curve( + estimator, X, y, train_sizes=train_sizes + ) + + score_type = "train" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, train_sizes_abs) + assert_allclose(y_data, train_scores.mean(axis=1)) + + score_type = "test" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Test"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, train_sizes_abs) + assert_allclose(y_data, test_scores.mean(axis=1)) + + score_type = "both" + display = LearningCurveDisplay.from_estimator( + estimator, + X, + y, + train_sizes=train_sizes, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train", "Test"] + + if std_display_style is None: + assert len(display.lines_) == 2 + assert display.errorbar_ is None + x_data_train, y_data_train = display.lines_[0].get_data() + x_data_test, y_data_test = display.lines_[1].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 2 + x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() + x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() + + assert_array_equal(x_data_train, train_sizes_abs) + assert_allclose(y_data_train, train_scores.mean(axis=1)) + assert_array_equal(x_data_test, train_sizes_abs) + assert_allclose(y_data_test, test_scores.mean(axis=1)) + + +@pytest.mark.parametrize("std_display_style", (None, "errorbar")) +def test_validation_curve_display_score_type(pyplot, data, std_display_style): + """Check the behaviour of setting the `score_type` parameter.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name, param_range = "max_depth", [1, 3, 5] + train_scores, test_scores = validation_curve( + estimator, X, y, param_name=param_name, param_range=param_range + ) + + score_type = "train" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, param_range) + assert_allclose(y_data, train_scores.mean(axis=1)) + + score_type = "test" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Test"] + + if std_display_style is None: + assert len(display.lines_) == 1 + assert display.errorbar_ is None + x_data, y_data = display.lines_[0].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 1 + x_data, y_data = display.errorbar_[0].lines[0].get_data() + + assert_array_equal(x_data, param_range) + assert_allclose(y_data, test_scores.mean(axis=1)) + + score_type = "both" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + score_type=score_type, + std_display_style=std_display_style, + ) + + _, legend_label = display.ax_.get_legend_handles_labels() + assert legend_label == ["Train", "Test"] + + if std_display_style is None: + assert len(display.lines_) == 2 + assert display.errorbar_ is None + x_data_train, y_data_train = display.lines_[0].get_data() + x_data_test, y_data_test = display.lines_[1].get_data() + else: + assert display.lines_ is None + assert len(display.errorbar_) == 2 + x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() + x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() + + assert_array_equal(x_data_train, param_range) + assert_allclose(y_data_train, train_scores.mean(axis=1)) + assert_array_equal(x_data_test, param_range) + assert_allclose(y_data_test, test_scores.mean(axis=1)) + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params, expected_xscale", + [ + ( + ValidationCurveDisplay, + {"param_name": "max_depth", "param_range": np.arange(1, 5)}, + "linear", + ), + (LearningCurveDisplay, {"train_sizes": np.linspace(0.1, 0.9, num=5)}, "linear"), + ( + ValidationCurveDisplay, + { + "param_name": "max_depth", + "param_range": np.round(np.logspace(0, 2, num=5)).astype(np.int64), + }, + "log", + ), + (LearningCurveDisplay, {"train_sizes": np.logspace(-1, 0, num=5)}, "log"), + ], +) +def test_curve_display_xscale_auto( + pyplot, data, CurveDisplay, specific_params, expected_xscale +): + """Check the behaviour of the x-axis scaling depending on the data provided.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + display = CurveDisplay.from_estimator(estimator, X, y, **specific_params) + assert display.ax_.get_xscale() == expected_xscale + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_std_display_style(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the parameter `std_display_style`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + import matplotlib as mpl + + std_display_style = None + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert len(display.lines_) == 2 + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + assert display.errorbar_ is None + assert display.fill_between_ is None + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + std_display_style = "fill_between" + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert len(display.lines_) == 2 + for line in display.lines_: + assert isinstance(line, mpl.lines.Line2D) + assert display.errorbar_ is None + assert len(display.fill_between_) == 2 + for fill_between in display.fill_between_: + assert isinstance(fill_between, mpl.collections.PolyCollection) + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + std_display_style = "errorbar" + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + ) + + assert display.lines_ is None + assert len(display.errorbar_) == 2 + for errorbar in display.errorbar_: + assert isinstance(errorbar, mpl.container.ErrorbarContainer) + assert display.fill_between_ is None + _, legend_label = display.ax_.get_legend_handles_labels() + assert len(legend_label) == 2 + + +@pytest.mark.parametrize( + "CurveDisplay, specific_params", + [ + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + (LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}), + ], +) +def test_curve_display_plot_kwargs(pyplot, data, CurveDisplay, specific_params): + """Check the behaviour of the different plotting keyword arguments: `line_kw`, + `fill_between_kw`, and `errorbar_kw`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + std_display_style = "fill_between" + line_kw = {"color": "red"} + fill_between_kw = {"color": "red", "alpha": 1.0} + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + line_kw=line_kw, + fill_between_kw=fill_between_kw, + ) + + assert display.lines_[0].get_color() == "red" + assert_allclose( + display.fill_between_[0].get_facecolor(), + [[1.0, 0.0, 0.0, 1.0]], # trust me, it's red + ) + + std_display_style = "errorbar" + errorbar_kw = {"color": "red"} + display = CurveDisplay.from_estimator( + estimator, + X, + y, + **specific_params, + std_display_style=std_display_style, + errorbar_kw=errorbar_kw, + ) + + assert display.errorbar_[0].lines[0].get_color() == "red" + + +# TODO(1.5): to be removed +def test_learning_curve_display_deprecate_log_scale(data, pyplot): + """Check that we warn for the deprecated parameter `log_scale`.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"): + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=True + ) + + assert display.ax_.get_xscale() == "log" + assert display.ax_.get_yscale() == "linear" + + with pytest.warns(FutureWarning, match="`log_scale` parameter is deprecated"): + display = LearningCurveDisplay.from_estimator( + estimator, X, y, train_sizes=[0.3, 0.6, 0.9], log_scale=False + ) + + assert display.ax_.get_xscale() == "linear" + assert display.ax_.get_yscale() == "linear" + + +@pytest.mark.parametrize( + "param_range, xscale", + [([5, 10, 15], "linear"), ([-50, 5, 50, 500], "symlog"), ([5, 50, 500], "log")], +) +def test_validation_curve_xscale_from_param_range_provided_as_a_list( + pyplot, data, param_range, xscale +): + """Check the induced xscale from the provided param_range values.""" + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + param_name = "max_depth" + display = ValidationCurveDisplay.from_estimator( + estimator, + X, + y, + param_name=param_name, + param_range=param_range, + ) + + assert display.ax_.get_xscale() == xscale + + +@pytest.mark.parametrize( + "Display, params", + [ + (LearningCurveDisplay, {}), + (ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}), + ], +) +def test_subclassing_displays(pyplot, data, Display, params): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + X, y = data + estimator = DecisionTreeClassifier(random_state=0) + + class SubclassOfDisplay(Display): + pass + + display = SubclassOfDisplay.from_estimator(estimator, X, y, **params) + assert isinstance(display, SubclassOfDisplay) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py new file mode 100644 index 0000000000000000000000000000000000000000..c0db76c5c6ef654340742eeaf3f744637a06fd8b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_search.py @@ -0,0 +1,2537 @@ +"""Test the search module""" + +import pickle +import re +import sys +from collections.abc import Iterable, Sized +from functools import partial +from io import StringIO +from itertools import chain, product +from types import GeneratorType + +import numpy as np +import pytest +from scipy.stats import bernoulli, expon, uniform + +from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier +from sklearn.cluster import KMeans +from sklearn.datasets import ( + make_blobs, + make_classification, + make_multilabel_classification, +) +from sklearn.ensemble import HistGradientBoostingClassifier +from sklearn.exceptions import FitFailedWarning +from sklearn.experimental import enable_halving_search_cv # noqa +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + LinearRegression, + Ridge, + SGDClassifier, +) +from sklearn.metrics import ( + accuracy_score, + confusion_matrix, + f1_score, + make_scorer, + r2_score, + recall_score, + roc_auc_score, +) +from sklearn.metrics.pairwise import euclidean_distances +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + HalvingGridSearchCV, + KFold, + LeaveOneGroupOut, + LeavePGroupsOut, + ParameterGrid, + ParameterSampler, + RandomizedSearchCV, + StratifiedKFold, + StratifiedShuffleSplit, + train_test_split, +) +from sklearn.model_selection._search import BaseSearchCV +from sklearn.model_selection.tests.common import OneTimeSplitter +from sklearn.neighbors import KernelDensity, KNeighborsClassifier, LocalOutlierFactor +from sklearn.pipeline import Pipeline +from sklearn.svm import SVC, LinearSVC +from sklearn.tests.metadata_routing_common import ( + ConsumingScorer, + _Registry, + check_recorded_metadata, +) +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._mocking import CheckingClassifier, MockDataFrame +from sklearn.utils._testing import ( + MinimalClassifier, + MinimalRegressor, + MinimalTransformer, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + + +# Neither of the following two estimators inherit from BaseEstimator, +# to test hyperparameter search on user-defined classifiers. +class MockClassifier: + """Dummy classifier to test the parameter search algorithms""" + + def __init__(self, foo_param=0): + self.foo_param = foo_param + + def fit(self, X, Y): + assert len(X) == len(Y) + self.classes_ = np.unique(Y) + return self + + def predict(self, T): + return T.shape[0] + + def transform(self, X): + return X + self.foo_param + + def inverse_transform(self, X): + return X - self.foo_param + + predict_proba = predict + predict_log_proba = predict + decision_function = predict + + def score(self, X=None, Y=None): + if self.foo_param > 1: + score = 1.0 + else: + score = 0.0 + return score + + def get_params(self, deep=False): + return {"foo_param": self.foo_param} + + def set_params(self, **params): + self.foo_param = params["foo_param"] + return self + + +class LinearSVCNoScore(LinearSVC): + """A LinearSVC classifier that has no score method.""" + + @property + def score(self): + raise AttributeError + + +X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) +y = np.array([1, 1, 2, 2]) + + +def assert_grid_iter_equals_getitem(grid): + assert list(grid) == [grid[i] for i in range(len(grid))] + + +@pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) +@pytest.mark.parametrize( + "input, error_type, error_message", + [ + (0, TypeError, r"Parameter .* a dict or a list, got: 0 of type int"), + ([{"foo": [0]}, 0], TypeError, r"Parameter .* is not a dict \(0\)"), + ( + {"foo": 0}, + TypeError, + r"Parameter (grid|distribution) for parameter 'foo' (is not|needs to be) " + r"(a list or a numpy array|iterable or a distribution).*", + ), + ], +) +def test_validate_parameter_input(klass, input, error_type, error_message): + with pytest.raises(error_type, match=error_message): + klass(input) + + +def test_parameter_grid(): + # Test basic properties of ParameterGrid. + params1 = {"foo": [1, 2, 3]} + grid1 = ParameterGrid(params1) + assert isinstance(grid1, Iterable) + assert isinstance(grid1, Sized) + assert len(grid1) == 3 + assert_grid_iter_equals_getitem(grid1) + + params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} + grid2 = ParameterGrid(params2) + assert len(grid2) == 6 + + # loop to assert we can iterate over the grid multiple times + for i in range(2): + # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) + points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) + assert points == set( + ("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]) + ) + assert_grid_iter_equals_getitem(grid2) + + # Special case: empty grid (useful to get default estimator settings) + empty = ParameterGrid({}) + assert len(empty) == 1 + assert list(empty) == [{}] + assert_grid_iter_equals_getitem(empty) + with pytest.raises(IndexError): + empty[1] + + has_empty = ParameterGrid([{"C": [1, 10]}, {}, {"C": [0.5]}]) + assert len(has_empty) == 4 + assert list(has_empty) == [{"C": 1}, {"C": 10}, {}, {"C": 0.5}] + assert_grid_iter_equals_getitem(has_empty) + + +def test_grid_search(): + # Test that the best estimator contains the right value for foo_param + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3) + # make sure it selects the smallest parameter in case of ties + old_stdout = sys.stdout + sys.stdout = StringIO() + grid_search.fit(X, y) + sys.stdout = old_stdout + assert grid_search.best_estimator_.foo_param == 2 + + assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) + + # Smoke test the score etc: + grid_search.score(X, y) + grid_search.predict_proba(X) + grid_search.decision_function(X) + grid_search.transform(X) + + # Test exception handling on scoring + grid_search.scoring = "sklearn" + with pytest.raises(ValueError): + grid_search.fit(X, y) + + +def test_grid_search_pipeline_steps(): + # check that parameters that are estimators are cloned before fitting + pipe = Pipeline([("regressor", LinearRegression())]) + param_grid = {"regressor": [LinearRegression(), Ridge()]} + grid_search = GridSearchCV(pipe, param_grid, cv=2) + grid_search.fit(X, y) + regressor_results = grid_search.cv_results_["param_regressor"] + assert isinstance(regressor_results[0], LinearRegression) + assert isinstance(regressor_results[1], Ridge) + assert not hasattr(regressor_results[0], "coef_") + assert not hasattr(regressor_results[1], "coef_") + assert regressor_results[0] is not grid_search.best_estimator_ + assert regressor_results[1] is not grid_search.best_estimator_ + # check that we didn't modify the parameter grid that was passed + assert not hasattr(param_grid["regressor"][0], "coef_") + assert not hasattr(param_grid["regressor"][1], "coef_") + + +@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) +def test_SearchCV_with_fit_params(SearchCV): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_fit_params=["spam", "eggs"]) + searcher = SearchCV(clf, {"foo_param": [1, 2, 3]}, cv=2, error_score="raise") + + # The CheckingClassifier generates an assertion error if + # a parameter is missing or has length != len(X). + err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." + with pytest.raises(AssertionError, match=err_msg): + searcher.fit(X, y, spam=np.ones(10)) + + err_msg = "Fit parameter spam has length 1; expected" + with pytest.raises(AssertionError, match=err_msg): + searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) + searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) + + +@ignore_warnings +def test_grid_search_no_score(): + # Test grid-search on classifier that has no score function. + clf = LinearSVC(dual="auto", random_state=0) + X, y = make_blobs(random_state=0, centers=2) + Cs = [0.1, 1, 10] + clf_no_score = LinearSVCNoScore(dual="auto", random_state=0) + grid_search = GridSearchCV(clf, {"C": Cs}, scoring="accuracy") + grid_search.fit(X, y) + + grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}, scoring="accuracy") + # smoketest grid search + grid_search_no_score.fit(X, y) + + # check that best params are equal + assert grid_search_no_score.best_params_ == grid_search.best_params_ + # check that we can call score and that it gives the correct result + assert grid_search.score(X, y) == grid_search_no_score.score(X, y) + + # giving no scoring function raises an error + grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}) + with pytest.raises(TypeError, match="no scoring"): + grid_search_no_score.fit([[1]]) + + +def test_grid_search_score_method(): + X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [0.1]} + + search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) + search_accuracy = GridSearchCV(clf, grid, scoring="accuracy").fit(X, y) + search_no_score_method_auc = GridSearchCV( + LinearSVCNoScore(dual="auto"), grid, scoring="roc_auc" + ).fit(X, y) + search_auc = GridSearchCV(clf, grid, scoring="roc_auc").fit(X, y) + + # Check warning only occurs in situation where behavior changed: + # estimator requires score method to compete with scoring parameter + score_no_scoring = search_no_scoring.score(X, y) + score_accuracy = search_accuracy.score(X, y) + score_no_score_auc = search_no_score_method_auc.score(X, y) + score_auc = search_auc.score(X, y) + + # ensure the test is sane + assert score_auc < 1.0 + assert score_accuracy < 1.0 + assert score_auc != score_accuracy + + assert_almost_equal(score_accuracy, score_no_scoring) + assert_almost_equal(score_auc, score_no_score_auc) + + +def test_grid_search_groups(): + # Check if ValueError (when groups is None) propagates to GridSearchCV + # And also check if groups is correctly passed to the cv object + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=15, n_classes=2, random_state=0) + groups = rng.randint(0, 3, 15) + + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [1]} + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(n_splits=3), + GroupShuffleSplit(), + ] + error_msg = "The 'groups' parameter should not be None." + for cv in group_cvs: + gs = GridSearchCV(clf, grid, cv=cv) + with pytest.raises(ValueError, match=error_msg): + gs.fit(X, y) + gs.fit(X, y, groups=groups) + + non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] + for cv in non_group_cvs: + gs = GridSearchCV(clf, grid, cv=cv) + # Should not raise an error + gs.fit(X, y) + + +def test_classes__property(): + # Test that classes_ property matches best_estimator_.classes_ + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + Cs = [0.1, 1, 10] + + grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs}) + grid_search.fit(X, y) + assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) + + # Test that regressors do not have a classes_ attribute + grid_search = GridSearchCV(Ridge(), {"alpha": [1.0, 2.0]}) + grid_search.fit(X, y) + assert not hasattr(grid_search, "classes_") + + # Test that the grid searcher has no classes_ attribute before it's fit + grid_search = GridSearchCV(LinearSVC(dual="auto", random_state=0), {"C": Cs}) + assert not hasattr(grid_search, "classes_") + + # Test that the grid searcher has no classes_ attribute without a refit + grid_search = GridSearchCV( + LinearSVC(dual="auto", random_state=0), {"C": Cs}, refit=False + ) + grid_search.fit(X, y) + assert not hasattr(grid_search, "classes_") + + +def test_trivial_cv_results_attr(): + # Test search over a "grid" with only one point. + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1]}, cv=3) + grid_search.fit(X, y) + assert hasattr(grid_search, "cv_results_") + + random_search = RandomizedSearchCV(clf, {"foo_param": [0]}, n_iter=1, cv=3) + random_search.fit(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_no_refit(): + # Test that GSCV can be used for model selection alone without refitting + clf = MockClassifier() + for scoring in [None, ["accuracy", "precision"]]: + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=False, cv=3) + grid_search.fit(X, y) + assert ( + not hasattr(grid_search, "best_estimator_") + and hasattr(grid_search, "best_index_") + and hasattr(grid_search, "best_params_") + ) + + # Make sure the functions predict/transform etc. raise meaningful + # error messages + for fn_name in ( + "predict", + "predict_proba", + "predict_log_proba", + "transform", + "inverse_transform", + ): + outer_msg = f"has no attribute '{fn_name}'" + inner_msg = ( + f"`refit=False`. {fn_name} is available only after " + "refitting on the best parameters" + ) + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + getattr(grid_search, fn_name)(X) + + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + # Test that an invalid refit param raises appropriate error messages + error_msg = ( + "For multi-metric scoring, the parameter refit must be set to a scorer key" + ) + for refit in [True, "recall", "accuracy"]: + with pytest.raises(ValueError, match=error_msg): + GridSearchCV( + clf, {}, refit=refit, scoring={"acc": "accuracy", "prec": "precision"} + ).fit(X, y) + + +def test_grid_search_error(): + # Test that grid search will capture errors on data with different length + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + with pytest.raises(ValueError): + cv.fit(X_[:180], y_) + + +def test_grid_search_one_grid_point(): + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} + + clf = SVC(gamma="auto") + cv = GridSearchCV(clf, param_dict) + cv.fit(X_, y_) + + clf = SVC(C=1.0, kernel="rbf", gamma=0.1) + clf.fit(X_, y_) + + assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) + + +def test_grid_search_when_param_grid_includes_range(): + # Test that the best estimator contains the right value for foo_param + clf = MockClassifier() + grid_search = None + grid_search = GridSearchCV(clf, {"foo_param": range(1, 4)}, cv=3) + grid_search.fit(X, y) + assert grid_search.best_estimator_.foo_param == 2 + + +def test_grid_search_bad_param_grid(): + X, y = make_classification(n_samples=10, n_features=5, random_state=0) + param_dict = {"C": 1} + clf = SVC(gamma="auto") + error_msg = re.escape( + "Parameter grid for parameter 'C' needs to be a list or " + "a numpy array, but got 1 (of type int) instead. Single " + "values need to be wrapped in a list with one element." + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(TypeError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": []} + clf = SVC() + error_msg = re.escape( + "Parameter grid for parameter 'C' need to be a non-empty sequence, got: []" + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(ValueError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": "1,2,3"} + clf = SVC(gamma="auto") + error_msg = re.escape( + "Parameter grid for parameter 'C' needs to be a list or a numpy array, " + "but got '1,2,3' (of type str) instead. Single values need to be " + "wrapped in a list with one element." + ) + search = GridSearchCV(clf, param_dict) + with pytest.raises(TypeError, match=error_msg): + search.fit(X, y) + + param_dict = {"C": np.ones((3, 2))} + clf = SVC() + search = GridSearchCV(clf, param_dict) + with pytest.raises(ValueError): + search.fit(X, y) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_grid_search_sparse(csr_container): + # Test that grid search works with both dense and sparse matrices + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(X_[:180], y_[:180]) + y_pred = cv.predict(X_[180:]) + C = cv.best_estimator_.C + + X_ = csr_container(X_) + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(X_[:180].tocoo(), y_[:180]) + y_pred2 = cv.predict(X_[180:]) + C2 = cv.best_estimator_.C + + assert np.mean(y_pred == y_pred2) >= 0.9 + assert C == C2 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_grid_search_sparse_scoring(csr_container): + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1") + cv.fit(X_[:180], y_[:180]) + y_pred = cv.predict(X_[180:]) + C = cv.best_estimator_.C + + X_ = csr_container(X_) + clf = LinearSVC(dual="auto") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1") + cv.fit(X_[:180], y_[:180]) + y_pred2 = cv.predict(X_[180:]) + C2 = cv.best_estimator_.C + + assert_array_equal(y_pred, y_pred2) + assert C == C2 + # Smoke test the score + # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), + # cv.score(X_[:180], y[:180])) + + # test loss where greater is worse + def f1_loss(y_true_, y_pred_): + return -f1_score(y_true_, y_pred_) + + F1Loss = make_scorer(f1_loss, greater_is_better=False) + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring=F1Loss) + cv.fit(X_[:180], y_[:180]) + y_pred3 = cv.predict(X_[180:]) + C3 = cv.best_estimator_.C + + assert C == C3 + assert_array_equal(y_pred, y_pred3) + + +def test_grid_search_precomputed_kernel(): + # Test that grid search works when the input features are given in the + # form of a precomputed kernel matrix + X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) + + # compute the training kernel matrix corresponding to the linear kernel + K_train = np.dot(X_[:180], X_[:180].T) + y_train = y_[:180] + + clf = SVC(kernel="precomputed") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + cv.fit(K_train, y_train) + + assert cv.best_score_ >= 0 + + # compute the test kernel matrix + K_test = np.dot(X_[180:], X_[:180].T) + y_test = y_[180:] + + y_pred = cv.predict(K_test) + + assert np.mean(y_pred == y_test) >= 0 + + # test error is raised when the precomputed kernel is not array-like + # or sparse + with pytest.raises(ValueError): + cv.fit(K_train.tolist(), y_train) + + +def test_grid_search_precomputed_kernel_error_nonsquare(): + # Test that grid search returns an error with a non-square precomputed + # training kernel matrix + K_train = np.zeros((10, 20)) + y_train = np.ones((10,)) + clf = SVC(kernel="precomputed") + cv = GridSearchCV(clf, {"C": [0.1, 1.0]}) + with pytest.raises(ValueError): + cv.fit(K_train, y_train) + + +class BrokenClassifier(BaseEstimator): + """Broken classifier that cannot be fit twice""" + + def __init__(self, parameter=None): + self.parameter = parameter + + def fit(self, X, y): + assert not hasattr(self, "has_been_fit_") + self.has_been_fit_ = True + + def predict(self, X): + return np.zeros(X.shape[0]) + + +@ignore_warnings +def test_refit(): + # Regression test for bug in refitting + # Simulates re-fitting a broken estimator; this used to break with + # sparse SVMs. + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = GridSearchCV( + BrokenClassifier(), [{"parameter": [0, 1]}], scoring="precision", refit=True + ) + clf.fit(X, y) + + +def test_refit_callable(): + """ + Test refit=callable, which adds flexibility in identifying the + "best" estimator. + """ + + def refit_callable(cv_results): + """ + A dummy function tests `refit=callable` interface. + Return the index of a model that has the least + `mean_test_score`. + """ + # Fit a dummy clf with `refit=True` to get a list of keys in + # clf.cv_results_. + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring="precision", + refit=True, + ) + clf.fit(X, y) + # Ensure that `best_index_ != 0` for this dummy clf + assert clf.best_index_ != 0 + + # Assert every key matches those in `cv_results` + for key in clf.cv_results_.keys(): + assert key in cv_results + + return cv_results["mean_test_score"].argmin() + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring="precision", + refit=refit_callable, + ) + clf.fit(X, y) + + assert clf.best_index_ == 0 + # Ensure `best_score_` is disabled when using `refit=callable` + assert not hasattr(clf, "best_score_") + + +def test_refit_callable_invalid_type(): + """ + Test implementation catches the errors when 'best_index_' returns an + invalid result. + """ + + def refit_callable_invalid_type(cv_results): + """ + A dummy function tests when returned 'best_index_' is not integer. + """ + return None + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring="precision", + refit=refit_callable_invalid_type, + ) + with pytest.raises(TypeError, match="best_index_ returned is not an integer"): + clf.fit(X, y) + + +@pytest.mark.parametrize("out_bound_value", [-1, 2]) +@pytest.mark.parametrize("search_cv", [RandomizedSearchCV, GridSearchCV]) +def test_refit_callable_out_bound(out_bound_value, search_cv): + """ + Test implementation catches the errors when 'best_index_' returns an + out of bound result. + """ + + def refit_callable_out_bound(cv_results): + """ + A dummy function tests when returned 'best_index_' is out of bounds. + """ + return out_bound_value + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + + clf = search_cv( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring="precision", + refit=refit_callable_out_bound, + ) + with pytest.raises(IndexError, match="best_index_ index out of range"): + clf.fit(X, y) + + +def test_refit_callable_multi_metric(): + """ + Test refit=callable in multiple metric evaluation setting + """ + + def refit_callable(cv_results): + """ + A dummy function tests `refit=callable` interface. + Return the index of a model that has the least + `mean_test_prec`. + """ + assert "mean_test_prec" in cv_results + return cv_results["mean_test_prec"].argmin() + + X, y = make_classification(n_samples=100, n_features=4, random_state=42) + scoring = {"Accuracy": make_scorer(accuracy_score), "prec": "precision"} + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.01, 0.1, 1]}, + scoring=scoring, + refit=refit_callable, + ) + clf.fit(X, y) + + assert clf.best_index_ == 0 + # Ensure `best_score_` is disabled when using `refit=callable` + assert not hasattr(clf, "best_score_") + + +def test_gridsearch_nd(): + # Pass X as list in GridSearchCV + X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) + y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) + + def check_X(x): + return x.shape[1:] == (5, 3, 2) + + def check_y(x): + return x.shape[1:] == (7, 11) + + clf = CheckingClassifier( + check_X=check_X, + check_y=check_y, + methods_to_check=["fit"], + ) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}) + grid_search.fit(X_4d, y_3d).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_X_as_list(): + # Pass X as list in GridSearchCV + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = CheckingClassifier( + check_X=lambda x: isinstance(x, list), + methods_to_check=["fit"], + ) + cv = KFold(n_splits=3) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv) + grid_search.fit(X.tolist(), y).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +def test_y_as_list(): + # Pass y as list in GridSearchCV + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + clf = CheckingClassifier( + check_y=lambda x: isinstance(x, list), + methods_to_check=["fit"], + ) + cv = KFold(n_splits=3) + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv) + grid_search.fit(X, y.tolist()).score(X, y) + assert hasattr(grid_search, "cv_results_") + + +@ignore_warnings +def test_pandas_input(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((DataFrame, Series)) + except ImportError: + pass + + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + + for InputFeatureType, TargetType in types: + # X dataframe, y series + X_df, y_ser = InputFeatureType(X), TargetType(y) + + def check_df(x): + return isinstance(x, InputFeatureType) + + def check_series(x): + return isinstance(x, TargetType) + + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}) + grid_search.fit(X_df, y_ser).score(X_df, y_ser) + grid_search.predict(X_df) + assert hasattr(grid_search, "cv_results_") + + +def test_unsupervised_grid_search(): + # test grid-search with unsupervised estimator + X, y = make_blobs(n_samples=50, random_state=0) + km = KMeans(random_state=0, init="random", n_init=1) + + # Multi-metric evaluation unsupervised + scoring = ["adjusted_rand_score", "fowlkes_mallows_score"] + for refit in ["adjusted_rand_score", "fowlkes_mallows_score"]: + grid_search = GridSearchCV( + km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit + ) + grid_search.fit(X, y) + # Both ARI and FMS can find the right number :) + assert grid_search.best_params_["n_clusters"] == 3 + + # Single metric evaluation unsupervised + grid_search = GridSearchCV( + km, param_grid=dict(n_clusters=[2, 3, 4]), scoring="fowlkes_mallows_score" + ) + grid_search.fit(X, y) + assert grid_search.best_params_["n_clusters"] == 3 + + # Now without a score, and without y + grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) + grid_search.fit(X) + assert grid_search.best_params_["n_clusters"] == 4 + + +def test_gridsearch_no_predict(): + # test grid-search with an estimator without predict. + # slight duplication of a test from KDE + def custom_scoring(estimator, X): + return 42 if estimator.bandwidth == 0.1 else 0 + + X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) + search = GridSearchCV( + KernelDensity(), + param_grid=dict(bandwidth=[0.01, 0.1, 1]), + scoring=custom_scoring, + ) + search.fit(X) + assert search.best_params_["bandwidth"] == 0.1 + assert search.best_score_ == 42 + + +def test_param_sampler(): + # test basic properties of param sampler + param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=10, random_state=0 + ) + samples = [x for x in sampler] + assert len(samples) == 10 + for sample in samples: + assert sample["kernel"] in ["rbf", "linear"] + assert 0 <= sample["C"] <= 1 + + # test that repeated calls yield identical parameters + param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=3, random_state=0 + ) + assert [x for x in sampler] == [x for x in sampler] + + param_distributions = {"C": uniform(0, 1)} + sampler = ParameterSampler( + param_distributions=param_distributions, n_iter=10, random_state=0 + ) + assert [x for x in sampler] == [x for x in sampler] + + +def check_cv_results_array_types(search, param_keys, score_keys): + # Check if the search `cv_results`'s array are of correct types + cv_results = search.cv_results_ + assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) + assert all(cv_results[key].dtype == object for key in param_keys) + assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) + assert all( + cv_results[key].dtype == np.float64 + for key in score_keys + if not key.startswith("rank") + ) + + scorer_keys = search.scorer_.keys() if search.multimetric_ else ["score"] + + for key in scorer_keys: + assert cv_results["rank_test_%s" % key].dtype == np.int32 + + +def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand, extra_keys=()): + # Test the search.cv_results_ contains all the required results + all_keys = param_keys + score_keys + extra_keys + assert_array_equal(sorted(cv_results.keys()), sorted(all_keys + ("params",))) + assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) + + +def test_grid_search_cv_results(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_grid_points = 6 + params = [ + dict( + kernel=[ + "rbf", + ], + C=[1, 10], + gamma=[0.1, 1], + ), + dict( + kernel=[ + "poly", + ], + degree=[1, 2], + ), + ] + + param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel") + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + n_candidates = n_grid_points + + search = GridSearchCV(SVC(), cv=3, param_grid=params, return_train_score=True) + search.fit(X, y) + cv_results = search.cv_results_ + # Check if score and timing are reasonable + assert all(cv_results["rank_test_score"] >= 1) + assert (all(cv_results[k] >= 0) for k in score_keys if k != "rank_test_score") + assert ( + all(cv_results[k] <= 1) + for k in score_keys + if "time" not in k and k != "rank_test_score" + ) + # Check cv_results structure + check_cv_results_array_types(search, param_keys, score_keys) + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) + # Check masking + cv_results = search.cv_results_ + + poly_results = [ + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ] + assert all(poly_results) + assert len(poly_results) == 2 + + rbf_results = [ + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ] + assert all(rbf_results) + assert len(rbf_results) == 4 + + +def test_random_search_cv_results(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_search_iter = 30 + + params = [ + {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, + {"kernel": ["poly"], "degree": [2, 3]}, + ] + param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel") + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + n_candidates = n_search_iter + + search = RandomizedSearchCV( + SVC(), + n_iter=n_search_iter, + cv=3, + param_distributions=params, + return_train_score=True, + ) + search.fit(X, y) + cv_results = search.cv_results_ + # Check results structure + check_cv_results_array_types(search, param_keys, score_keys) + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) + assert all( + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ) + assert all( + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ) + + +@pytest.mark.parametrize( + "SearchCV, specialized_params", + [ + (GridSearchCV, {"param_grid": {"C": [1, 10]}}), + (RandomizedSearchCV, {"param_distributions": {"C": [1, 10]}, "n_iter": 2}), + ], +) +def test_search_default_iid(SearchCV, specialized_params): + # Test the IID parameter TODO: Clearly this test does something else??? + # noise-free simple 2d-data + X, y = make_blobs( + centers=[[0, 0], [1, 0], [0, 1], [1, 1]], + random_state=0, + cluster_std=0.1, + shuffle=False, + n_samples=80, + ) + # split dataset into two folds that are not iid + # first one contains data of all 4 blobs, second only from two. + mask = np.ones(X.shape[0], dtype=bool) + mask[np.where(y == 1)[0][::2]] = 0 + mask[np.where(y == 2)[0][::2]] = 0 + # this leads to perfect classification on one fold and a score of 1/3 on + # the other + # create "cv" for splits + cv = [[mask, ~mask], [~mask, mask]] + + common_params = {"estimator": SVC(), "cv": cv, "return_train_score": True} + search = SearchCV(**common_params, **specialized_params) + search.fit(X, y) + + test_cv_scores = np.array( + [ + search.cv_results_["split%d_test_score" % s][0] + for s in range(search.n_splits_) + ] + ) + test_mean = search.cv_results_["mean_test_score"][0] + test_std = search.cv_results_["std_test_score"][0] + + train_cv_scores = np.array( + [ + search.cv_results_["split%d_train_score" % s][0] + for s in range(search.n_splits_) + ] + ) + train_mean = search.cv_results_["mean_train_score"][0] + train_std = search.cv_results_["std_train_score"][0] + + assert search.cv_results_["param_C"][0] == 1 + # scores are the same as above + assert_allclose(test_cv_scores, [1, 1.0 / 3.0]) + assert_allclose(train_cv_scores, [1, 1]) + # Unweighted mean/std is used + assert test_mean == pytest.approx(np.mean(test_cv_scores)) + assert test_std == pytest.approx(np.std(test_cv_scores)) + + # For the train scores, we do not take a weighted mean irrespective of + # i.i.d. or not + assert train_mean == pytest.approx(1) + assert train_std == pytest.approx(0) + + +def test_grid_search_cv_results_multimetric(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_splits = 3 + params = [ + dict( + kernel=[ + "rbf", + ], + C=[1, 10], + gamma=[0.1, 1], + ), + dict( + kernel=[ + "poly", + ], + degree=[1, 2], + ), + ] + + grid_searches = [] + for scoring in ( + {"accuracy": make_scorer(accuracy_score), "recall": make_scorer(recall_score)}, + "accuracy", + "recall", + ): + grid_search = GridSearchCV( + SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False + ) + grid_search.fit(X, y) + grid_searches.append(grid_search) + + compare_cv_results_multimetric_with_single(*grid_searches) + + +def test_random_search_cv_results_multimetric(): + X, y = make_classification(n_samples=50, n_features=4, random_state=42) + + n_splits = 3 + n_search_iter = 30 + + # Scipy 0.12's stats dists do not accept seed, hence we use param grid + params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) + for refit in (True, False): + random_searches = [] + for scoring in (("accuracy", "recall"), "accuracy", "recall"): + # If True, for multi-metric pass refit='accuracy' + if refit: + probability = True + refit = "accuracy" if isinstance(scoring, tuple) else refit + else: + probability = False + clf = SVC(probability=probability, random_state=42) + random_search = RandomizedSearchCV( + clf, + n_iter=n_search_iter, + cv=n_splits, + param_distributions=params, + scoring=scoring, + refit=refit, + random_state=0, + ) + random_search.fit(X, y) + random_searches.append(random_search) + + compare_cv_results_multimetric_with_single(*random_searches) + compare_refit_methods_when_refit_with_acc( + random_searches[0], random_searches[1], refit + ) + + +def compare_cv_results_multimetric_with_single(search_multi, search_acc, search_rec): + """Compare multi-metric cv_results with the ensemble of multiple + single metric cv_results from single metric grid/random search""" + + assert search_multi.multimetric_ + assert_array_equal(sorted(search_multi.scorer_), ("accuracy", "recall")) + + cv_results_multi = search_multi.cv_results_ + cv_results_acc_rec = { + re.sub("_score$", "_accuracy", k): v for k, v in search_acc.cv_results_.items() + } + cv_results_acc_rec.update( + {re.sub("_score$", "_recall", k): v for k, v in search_rec.cv_results_.items()} + ) + + # Check if score and timing are reasonable, also checks if the keys + # are present + assert all( + ( + np.all(cv_results_multi[k] <= 1) + for k in ( + "mean_score_time", + "std_score_time", + "mean_fit_time", + "std_fit_time", + ) + ) + ) + + # Compare the keys, other than time keys, among multi-metric and + # single metric grid search results. np.testing.assert_equal performs a + # deep nested comparison of the two cv_results dicts + np.testing.assert_equal( + {k: v for k, v in cv_results_multi.items() if not k.endswith("_time")}, + {k: v for k, v in cv_results_acc_rec.items() if not k.endswith("_time")}, + ) + + +def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): + """Compare refit multi-metric search methods with single metric methods""" + assert search_acc.refit == refit + if refit: + assert search_multi.refit == "accuracy" + else: + assert not search_multi.refit + return # search cannot predict/score without refit + + X, y = make_blobs(n_samples=100, n_features=4, random_state=42) + for method in ("predict", "predict_proba", "predict_log_proba"): + assert_almost_equal( + getattr(search_multi, method)(X), getattr(search_acc, method)(X) + ) + assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) + for key in ("best_index_", "best_score_", "best_params_"): + assert getattr(search_multi, key) == getattr(search_acc, key) + + +@pytest.mark.parametrize( + "search_cv", + [ + RandomizedSearchCV( + estimator=DecisionTreeClassifier(), + param_distributions={"max_depth": [5, 10]}, + ), + GridSearchCV( + estimator=DecisionTreeClassifier(), param_grid={"max_depth": [5, 10]} + ), + ], +) +def test_search_cv_score_samples_error(search_cv): + X, y = make_blobs(n_samples=100, n_features=4, random_state=42) + search_cv.fit(X, y) + + # Make sure to error out when underlying estimator does not implement + # the method `score_samples` + outer_msg = f"'{search_cv.__class__.__name__}' has no attribute 'score_samples'" + inner_msg = "'DecisionTreeClassifier' object has no attribute 'score_samples'" + + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + search_cv.score_samples(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg == str(exec_info.value.__cause__) + + +@pytest.mark.parametrize( + "search_cv", + [ + RandomizedSearchCV( + estimator=LocalOutlierFactor(novelty=True), + param_distributions={"n_neighbors": [5, 10]}, + scoring="precision", + ), + GridSearchCV( + estimator=LocalOutlierFactor(novelty=True), + param_grid={"n_neighbors": [5, 10]}, + scoring="precision", + ), + ], +) +def test_search_cv_score_samples_method(search_cv): + # Set parameters + rng = np.random.RandomState(42) + n_samples = 300 + outliers_fraction = 0.15 + n_outliers = int(outliers_fraction * n_samples) + n_inliers = n_samples - n_outliers + + # Create dataset + X = make_blobs( + n_samples=n_inliers, + n_features=2, + centers=[[0, 0], [0, 0]], + cluster_std=0.5, + random_state=0, + )[0] + # Add some noisy points + X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0) + + # Define labels to be able to score the estimator with `search_cv` + y_true = np.array([1] * n_samples) + y_true[-n_outliers:] = -1 + + # Fit on data + search_cv.fit(X, y_true) + + # Verify that the stand alone estimator yields the same results + # as the ones obtained with *SearchCV + assert_allclose( + search_cv.score_samples(X), search_cv.best_estimator_.score_samples(X) + ) + + +def test_search_cv_results_rank_tie_breaking(): + X, y = make_blobs(n_samples=50, random_state=42) + + # The two C values are close enough to give similar models + # which would result in a tie of their mean cv-scores + param_grid = {"C": [1, 1.001, 0.001]} + + grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) + random_search = RandomizedSearchCV( + SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True + ) + + for search in (grid_search, random_search): + search.fit(X, y) + cv_results = search.cv_results_ + # Check tie breaking strategy - + # Check that there is a tie in the mean scores between + # candidates 1 and 2 alone + assert_almost_equal( + cv_results["mean_test_score"][0], cv_results["mean_test_score"][1] + ) + assert_almost_equal( + cv_results["mean_train_score"][0], cv_results["mean_train_score"][1] + ) + assert not np.allclose( + cv_results["mean_test_score"][1], cv_results["mean_test_score"][2] + ) + assert not np.allclose( + cv_results["mean_train_score"][1], cv_results["mean_train_score"][2] + ) + # 'min' rank should be assigned to the tied candidates + assert_almost_equal(search.cv_results_["rank_test_score"], [1, 1, 3]) + + +def test_search_cv_results_none_param(): + X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] + estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) + est_parameters = {"random_state": [0, None]} + cv = KFold() + + for est in estimators: + grid_search = GridSearchCV( + est, + est_parameters, + cv=cv, + ).fit(X, y) + assert_array_equal(grid_search.cv_results_["param_random_state"], [0, None]) + + +@ignore_warnings() +def test_search_cv_timing(): + svc = LinearSVC(dual="auto", random_state=0) + + X = [ + [ + 1, + ], + [ + 2, + ], + [ + 3, + ], + [ + 4, + ], + ] + y = [0, 1, 1, 0] + + gs = GridSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0) + rs = RandomizedSearchCV(svc, {"C": [0, 1]}, cv=2, error_score=0, n_iter=2) + + for search in (gs, rs): + search.fit(X, y) + for key in ["mean_fit_time", "std_fit_time"]: + # NOTE The precision of time.time in windows is not high + # enough for the fit/score times to be non-zero for trivial X and y + assert np.all(search.cv_results_[key] >= 0) + assert np.all(search.cv_results_[key] < 1) + + for key in ["mean_score_time", "std_score_time"]: + assert search.cv_results_[key][1] >= 0 + assert search.cv_results_[key][0] == 0.0 + assert np.all(search.cv_results_[key] < 1) + + assert hasattr(search, "refit_time_") + assert isinstance(search.refit_time_, float) + assert search.refit_time_ >= 0 + + +def test_grid_search_correct_score_results(): + # test that correct scores are used + n_splits = 3 + clf = LinearSVC(dual="auto", random_state=0) + X, y = make_blobs(random_state=0, centers=2) + Cs = [0.1, 1, 10] + for score in ["f1", "roc_auc"]: + grid_search = GridSearchCV(clf, {"C": Cs}, scoring=score, cv=n_splits) + cv_results = grid_search.fit(X, y).cv_results_ + + # Test scorer names + result_keys = list(cv_results.keys()) + expected_keys = ("mean_test_score", "rank_test_score") + tuple( + "split%d_test_score" % cv_i for cv_i in range(n_splits) + ) + assert all(np.isin(expected_keys, result_keys)) + + cv = StratifiedKFold(n_splits=n_splits) + n_splits = grid_search.n_splits_ + for candidate_i, C in enumerate(Cs): + clf.set_params(C=C) + cv_scores = np.array( + [ + grid_search.cv_results_["split%d_test_score" % s][candidate_i] + for s in range(n_splits) + ] + ) + for i, (train, test) in enumerate(cv.split(X, y)): + clf.fit(X[train], y[train]) + if score == "f1": + correct_score = f1_score(y[test], clf.predict(X[test])) + elif score == "roc_auc": + dec = clf.decision_function(X[test]) + correct_score = roc_auc_score(y[test], dec) + assert_almost_equal(correct_score, cv_scores[i]) + + +def test_pickle(): + # Test that a fit search can be pickled + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=True, cv=3) + grid_search.fit(X, y) + grid_search_pickled = pickle.loads(pickle.dumps(grid_search)) + assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X)) + + random_search = RandomizedSearchCV( + clf, {"foo_param": [1, 2, 3]}, refit=True, n_iter=3, cv=3 + ) + random_search.fit(X, y) + random_search_pickled = pickle.loads(pickle.dumps(random_search)) + assert_array_almost_equal( + random_search.predict(X), random_search_pickled.predict(X) + ) + + +def test_grid_search_with_multioutput_data(): + # Test search with multi-output estimator + + X, y = make_multilabel_classification(return_indicator=True, random_state=0) + + est_parameters = {"max_depth": [1, 2, 3, 4]} + cv = KFold() + + estimators = [ + DecisionTreeRegressor(random_state=0), + DecisionTreeClassifier(random_state=0), + ] + + # Test with grid search cv + for est in estimators: + grid_search = GridSearchCV(est, est_parameters, cv=cv) + grid_search.fit(X, y) + res_params = grid_search.cv_results_["params"] + for cand_i in range(len(res_params)): + est.set_params(**res_params[cand_i]) + + for i, (train, test) in enumerate(cv.split(X, y)): + est.fit(X[train], y[train]) + correct_score = est.score(X[test], y[test]) + assert_almost_equal( + correct_score, + grid_search.cv_results_["split%d_test_score" % i][cand_i], + ) + + # Test with a randomized search + for est in estimators: + random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) + random_search.fit(X, y) + res_params = random_search.cv_results_["params"] + for cand_i in range(len(res_params)): + est.set_params(**res_params[cand_i]) + + for i, (train, test) in enumerate(cv.split(X, y)): + est.fit(X[train], y[train]) + correct_score = est.score(X[test], y[test]) + assert_almost_equal( + correct_score, + random_search.cv_results_["split%d_test_score" % i][cand_i], + ) + + +def test_predict_proba_disabled(): + # Test predict_proba when disabled on estimator. + X = np.arange(20).reshape(5, -1) + y = [0, 0, 1, 1, 1] + clf = SVC(probability=False) + gs = GridSearchCV(clf, {}, cv=2).fit(X, y) + assert not hasattr(gs, "predict_proba") + + +def test_grid_search_allows_nans(): + # Test GridSearchCV with SimpleImputer + X = np.arange(20, dtype=np.float64).reshape(5, -1) + X[2, :] = np.nan + y = [0, 0, 1, 1, 1] + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + GridSearchCV(p, {"classifier__foo_param": [1, 2, 3]}, cv=2).fit(X, y) + + +class FailingClassifier(BaseEstimator): + """Classifier that raises a ValueError on fit()""" + + FAILING_PARAMETER = 2 + + def __init__(self, parameter=None): + self.parameter = parameter + + def fit(self, X, y=None): + if self.parameter == FailingClassifier.FAILING_PARAMETER: + raise ValueError("Failing classifier failed as required") + + def predict(self, X): + return np.zeros(X.shape[0]) + + def score(self, X=None, Y=None): + return 0.0 + + +def test_grid_search_failing_classifier(): + # GridSearchCV with on_error != 'raise' + # Ensures that a warning is raised and score reset where appropriate. + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + # refit=False because we only want to check that errors caused by fits + # to individual folds will be caught and warnings raised instead. If + # refit was done, then an exception would be raised on refit and not + # caught by grid_search (expected behavior), and this would cause an + # error in this test. + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score=0.0, + ) + + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to 0\.0.+" + "5 fits failed with the following error.+ValueError.+Failing classifier failed" + " as required", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + n_candidates = len(gs.cv_results_["params"]) + + # Ensure that grid scores were set to zero as required for those fits + # that are expected to fail. + def get_cand_scores(i): + return np.array( + [gs.cv_results_["split%d_test_score" % s][i] for s in range(gs.n_splits_)] + ) + + assert all( + ( + np.all(get_cand_scores(cand_i) == 0.0) + for cand_i in range(n_candidates) + if gs.cv_results_["param_parameter"][cand_i] + == FailingClassifier.FAILING_PARAMETER + ) + ) + + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score=float("nan"), + ) + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to nan.+" + "5 fits failed with the following error.+ValueError.+Failing classifier failed" + " as required", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + n_candidates = len(gs.cv_results_["params"]) + assert all( + np.all(np.isnan(get_cand_scores(cand_i))) + for cand_i in range(n_candidates) + if gs.cv_results_["param_parameter"][cand_i] + == FailingClassifier.FAILING_PARAMETER + ) + + ranks = gs.cv_results_["rank_test_score"] + + # Check that succeeded estimators have lower ranks + assert ranks[0] <= 2 and ranks[1] <= 2 + # Check that failed estimator has the highest rank + assert ranks[clf.FAILING_PARAMETER] == 3 + assert gs.best_index_ != clf.FAILING_PARAMETER + + +def test_grid_search_classifier_all_fits_fail(): + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + gs = GridSearchCV( + clf, + [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}], + error_score=0.0, + ) + + warning_message = re.compile( + ( + "All the 15 fits failed.+15 fits failed with the following" + " error.+ValueError.+Failing classifier failed as required" + ), + flags=re.DOTALL, + ) + with pytest.raises(ValueError, match=warning_message): + gs.fit(X, y) + + +def test_grid_search_failing_classifier_raise(): + # GridSearchCV with on_error == 'raise' raises the error + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + # refit=False because we want to test the behaviour of the grid search part + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring="accuracy", + refit=False, + error_score="raise", + ) + + # FailingClassifier issues a ValueError so this is what we look for. + with pytest.raises(ValueError): + gs.fit(X, y) + + +def test_parameters_sampler_replacement(): + # raise warning if n_iter is bigger than total parameter space + params = [ + {"first": [0, 1], "second": ["a", "b", "c"]}, + {"third": ["two", "values"]}, + ] + sampler = ParameterSampler(params, n_iter=9) + n_iter = 9 + grid_size = 8 + expected_warning = ( + "The total space of parameters %d is smaller " + "than n_iter=%d. Running %d iterations. For " + "exhaustive searches, use GridSearchCV." % (grid_size, n_iter, grid_size) + ) + with pytest.warns(UserWarning, match=expected_warning): + list(sampler) + + # degenerates to GridSearchCV if n_iter the same as grid_size + sampler = ParameterSampler(params, n_iter=8) + samples = list(sampler) + assert len(samples) == 8 + for values in ParameterGrid(params): + assert values in samples + assert len(ParameterSampler(params, n_iter=1000)) == 8 + + # test sampling without replacement in a large grid + params = {"a": range(10), "b": range(10), "c": range(10)} + sampler = ParameterSampler(params, n_iter=99, random_state=42) + samples = list(sampler) + assert len(samples) == 99 + hashable_samples = ["a%db%dc%d" % (p["a"], p["b"], p["c"]) for p in samples] + assert len(set(hashable_samples)) == 99 + + # doesn't go into infinite loops + params_distribution = {"first": bernoulli(0.5), "second": ["a", "b", "c"]} + sampler = ParameterSampler(params_distribution, n_iter=7) + samples = list(sampler) + assert len(samples) == 7 + + +def test_stochastic_gradient_loss_param(): + # Make sure the predict_proba works when loss is specified + # as one of the parameters in the param_grid. + param_grid = { + "loss": ["log_loss"], + } + X = np.arange(24).reshape(6, -1) + y = [0, 0, 0, 1, 1, 1] + clf = GridSearchCV( + estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3 + ) + + # When the estimator is not fitted, `predict_proba` is not available as the + # loss is 'hinge'. + assert not hasattr(clf, "predict_proba") + clf.fit(X, y) + clf.predict_proba(X) + clf.predict_log_proba(X) + + # Make sure `predict_proba` is not available when setting loss=['hinge'] + # in param_grid + param_grid = { + "loss": ["hinge"], + } + clf = GridSearchCV( + estimator=SGDClassifier(loss="hinge"), param_grid=param_grid, cv=3 + ) + assert not hasattr(clf, "predict_proba") + clf.fit(X, y) + assert not hasattr(clf, "predict_proba") + + +def test_search_train_scores_set_to_false(): + X = np.arange(6).reshape(6, -1) + y = [0, 0, 0, 1, 1, 1] + clf = LinearSVC(dual="auto", random_state=0) + + gs = GridSearchCV(clf, param_grid={"C": [0.1, 0.2]}, cv=3) + gs.fit(X, y) + + +def test_grid_search_cv_splits_consistency(): + # Check if a one time iterable is accepted as a cv parameter. + n_samples = 100 + n_splits = 5 + X, y = make_classification(n_samples=n_samples, random_state=0) + + gs = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + return_train_score=True, + ) + gs.fit(X, y) + + gs2 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits), + return_train_score=True, + ) + gs2.fit(X, y) + + # Give generator as a cv parameter + assert isinstance( + KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), + GeneratorType, + ) + gs3 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits, shuffle=True, random_state=0).split(X, y), + return_train_score=True, + ) + gs3.fit(X, y) + + gs4 = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.2, 0.3]}, + cv=KFold(n_splits=n_splits, shuffle=True, random_state=0), + return_train_score=True, + ) + gs4.fit(X, y) + + def _pop_time_keys(cv_results): + for key in ( + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ): + cv_results.pop(key) + return cv_results + + # Check if generators are supported as cv and + # that the splits are consistent + np.testing.assert_equal( + _pop_time_keys(gs3.cv_results_), _pop_time_keys(gs4.cv_results_) + ) + + # OneTimeSplitter is a non-re-entrant cv where split can be called only + # once if ``cv.split`` is called once per param setting in GridSearchCV.fit + # the 2nd and 3rd parameter will not be evaluated as no train/test indices + # will be generated for the 2nd and subsequent cv.split calls. + # This is a check to make sure cv.split is not called once per param + # setting. + np.testing.assert_equal( + {k: v for k, v in gs.cv_results_.items() if not k.endswith("_time")}, + {k: v for k, v in gs2.cv_results_.items() if not k.endswith("_time")}, + ) + + # Check consistency of folds across the parameters + gs = GridSearchCV( + LinearSVC(dual="auto", random_state=0), + param_grid={"C": [0.1, 0.1, 0.2, 0.2]}, + cv=KFold(n_splits=n_splits, shuffle=True), + return_train_score=True, + ) + gs.fit(X, y) + + # As the first two param settings (C=0.1) and the next two param + # settings (C=0.2) are same, the test and train scores must also be + # same as long as the same train/test indices are generated for all + # the cv splits, for both param setting + for score_type in ("train", "test"): + per_param_scores = {} + for param_i in range(4): + per_param_scores[param_i] = [ + gs.cv_results_["split%d_%s_score" % (s, score_type)][param_i] + for s in range(5) + ] + + assert_array_almost_equal(per_param_scores[0], per_param_scores[1]) + assert_array_almost_equal(per_param_scores[2], per_param_scores[3]) + + +def test_transform_inverse_transform_round_trip(): + clf = MockClassifier() + grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=3, verbose=3) + + grid_search.fit(X, y) + X_round_trip = grid_search.inverse_transform(grid_search.transform(X)) + assert_array_equal(X, X_round_trip) + + +def test_custom_run_search(): + def check_results(results, gscv): + exp_results = gscv.cv_results_ + assert sorted(results.keys()) == sorted(exp_results) + for k in results: + if not k.endswith("_time"): + # XXX: results['params'] is a list :| + results[k] = np.asanyarray(results[k]) + if results[k].dtype.kind == "O": + assert_array_equal( + exp_results[k], results[k], err_msg="Checking " + k + ) + else: + assert_allclose(exp_results[k], results[k], err_msg="Checking " + k) + + def fit_grid(param_grid): + return GridSearchCV(clf, param_grid, return_train_score=True).fit(X, y) + + class CustomSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + def _run_search(self, evaluate): + results = evaluate([{"max_depth": 1}, {"max_depth": 2}]) + check_results(results, fit_grid({"max_depth": [1, 2]})) + results = evaluate([{"min_samples_split": 5}, {"min_samples_split": 10}]) + check_results( + results, + fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]), + ) + + # Using regressor to make sure each score differs + clf = DecisionTreeRegressor(random_state=0) + X, y = make_classification(n_samples=100, n_informative=4, random_state=0) + mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y) + gscv = fit_grid([{"max_depth": [1, 2]}, {"min_samples_split": [5, 10]}]) + + results = mycv.cv_results_ + check_results(results, gscv) + for attr in dir(gscv): + if ( + attr[0].islower() + and attr[-1:] == "_" + and attr + not in { + "cv_results_", + "best_estimator_", + "refit_time_", + "classes_", + "scorer_", + } + ): + assert getattr(gscv, attr) == getattr(mycv, attr), ( + "Attribute %s not equal" % attr + ) + + +def test__custom_fit_no_run_search(): + class NoRunSearchSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + def fit(self, X, y=None, groups=None, **fit_params): + return self + + # this should not raise any exceptions + NoRunSearchSearchCV(SVC()).fit(X, y) + + class BadSearchCV(BaseSearchCV): + def __init__(self, estimator, **kwargs): + super().__init__(estimator, **kwargs) + + with pytest.raises(NotImplementedError, match="_run_search not implemented."): + # this should raise a NotImplementedError + BadSearchCV(SVC()).fit(X, y) + + +def test_empty_cv_iterator_error(): + # Use global X, y + + # create cv + cv = KFold(n_splits=3).split(X) + + # pop all of it, this should cause the expected ValueError + [u for u in cv] + # cv is empty now + + train_size = 100 + ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) + + # assert that this raises an error + with pytest.raises( + ValueError, + match=( + "No fits were performed. " + "Was the CV iterator empty\\? " + "Were there no candidates\\?" + ), + ): + ridge.fit(X[:train_size], y[:train_size]) + + +def test_random_search_bad_cv(): + # Use global X, y + + class BrokenKFold(KFold): + def get_n_splits(self, *args, **kw): + return 1 + + # create bad cv + cv = BrokenKFold(n_splits=3) + + train_size = 100 + ridge = RandomizedSearchCV(Ridge(), {"alpha": [1e-3, 1e-2, 1e-1]}, cv=cv, n_jobs=4) + + # assert that this raises an error + with pytest.raises( + ValueError, + match=( + "cv.split and cv.get_n_splits returned " + "inconsistent results. Expected \\d+ " + "splits, got \\d+" + ), + ): + ridge.fit(X[:train_size], y[:train_size]) + + +@pytest.mark.parametrize("return_train_score", [False, True]) +@pytest.mark.parametrize( + "SearchCV, specialized_params", + [ + (GridSearchCV, {"param_grid": {"max_depth": [2, 3, 5, 8]}}), + ( + RandomizedSearchCV, + {"param_distributions": {"max_depth": [2, 3, 5, 8]}, "n_iter": 4}, + ), + ], +) +def test_searchcv_raise_warning_with_non_finite_score( + SearchCV, specialized_params, return_train_score +): + # Non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/10529 + # Check that we raise a UserWarning when a non-finite score is + # computed in the SearchCV + X, y = make_classification(n_classes=2, random_state=0) + + class FailingScorer: + """Scorer that will fail for some split but not all.""" + + def __init__(self): + self.n_counts = 0 + + def __call__(self, estimator, X, y): + self.n_counts += 1 + if self.n_counts % 5 == 0: + return np.nan + return 1 + + grid = SearchCV( + DecisionTreeClassifier(), + scoring=FailingScorer(), + cv=3, + return_train_score=return_train_score, + **specialized_params, + ) + + with pytest.warns(UserWarning) as warn_msg: + grid.fit(X, y) + + set_with_warning = ["test", "train"] if return_train_score else ["test"] + assert len(warn_msg) == len(set_with_warning) + for msg, dataset in zip(warn_msg, set_with_warning): + assert f"One or more of the {dataset} scores are non-finite" in str(msg.message) + + # all non-finite scores should be equally ranked last + last_rank = grid.cv_results_["rank_test_score"].max() + non_finite_mask = np.isnan(grid.cv_results_["mean_test_score"]) + assert_array_equal(grid.cv_results_["rank_test_score"][non_finite_mask], last_rank) + # all finite scores should be better ranked than the non-finite scores + assert np.all(grid.cv_results_["rank_test_score"][~non_finite_mask] < last_rank) + + +def test_callable_multimetric_confusion_matrix(): + # Test callable with many metrics inserts the correct names and metrics + # into the search cv object + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + cm = confusion_matrix(y, y_pred) + return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search = GridSearchCV(est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="fp") + + search.fit(X, y) + + score_names = ["tn", "fp", "fn", "tp"] + for name in score_names: + assert "mean_test_{}".format(name) in search.cv_results_ + + y_pred = search.predict(X) + cm = confusion_matrix(y, y_pred) + assert search.score(X, y) == pytest.approx(cm[0, 1]) + + +def test_callable_multimetric_same_as_list_of_strings(): + # Test callable multimetric is the same as a list of strings + def custom_scorer(est, X, y): + y_pred = est.predict(X) + return { + "recall": recall_score(y, y_pred), + "accuracy": accuracy_score(y, y_pred), + } + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search_callable = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=custom_scorer, refit="recall" + ) + search_str = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=["recall", "accuracy"], refit="recall" + ) + + search_callable.fit(X, y) + search_str.fit(X, y) + + assert search_callable.best_score_ == pytest.approx(search_str.best_score_) + assert search_callable.best_index_ == search_str.best_index_ + assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y)) + + +def test_callable_single_metric_same_as_single_string(): + # Tests callable scorer is the same as scoring with a single string + def custom_scorer(est, X, y): + y_pred = est.predict(X) + return recall_score(y, y_pred) + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + search_callable = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=custom_scorer, refit=True + ) + search_str = GridSearchCV(est, {"C": [0.1, 1]}, scoring="recall", refit="recall") + search_list_str = GridSearchCV( + est, {"C": [0.1, 1]}, scoring=["recall"], refit="recall" + ) + search_callable.fit(X, y) + search_str.fit(X, y) + search_list_str.fit(X, y) + + assert search_callable.best_score_ == pytest.approx(search_str.best_score_) + assert search_callable.best_index_ == search_str.best_index_ + assert search_callable.score(X, y) == pytest.approx(search_str.score(X, y)) + + assert search_list_str.best_score_ == pytest.approx(search_str.best_score_) + assert search_list_str.best_index_ == search_str.best_index_ + assert search_list_str.score(X, y) == pytest.approx(search_str.score(X, y)) + + +def test_callable_multimetric_error_on_invalid_key(): + # Raises when the callable scorer does not return a dict with `refit` key. + def bad_scorer(est, X, y): + return {"bad_name": 1} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + clf = GridSearchCV( + LinearSVC(dual="auto", random_state=42), + {"C": [0.1, 1]}, + scoring=bad_scorer, + refit="good_name", + ) + + msg = ( + "For multi-metric scoring, the parameter refit must be set to a " + "scorer key or a callable to refit" + ) + with pytest.raises(ValueError, match=msg): + clf.fit(X, y) + + +def test_callable_multimetric_error_failing_clf(): + # Warns when there is an estimator the fails to fit with a float + # error_score + def custom_scorer(est, X, y): + return {"acc": 1} + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + gs = GridSearchCV( + clf, + [{"parameter": [0, 1, 2]}], + scoring=custom_scorer, + refit=False, + error_score=0.1, + ) + + warning_message = re.compile( + "5 fits failed.+total of 15.+The score on these" + r" train-test partitions for these parameters will be set to 0\.1", + flags=re.DOTALL, + ) + with pytest.warns(FitFailedWarning, match=warning_message): + gs.fit(X, y) + + assert_allclose(gs.cv_results_["mean_test_acc"], [1, 1, 0.1]) + + +def test_callable_multimetric_clf_all_fits_fail(): + # Warns and raises when all estimator fails to fit. + def custom_scorer(est, X, y): + return {"acc": 1} + + X, y = make_classification(n_samples=20, n_features=10, random_state=0) + + clf = FailingClassifier() + + gs = GridSearchCV( + clf, + [{"parameter": [FailingClassifier.FAILING_PARAMETER] * 3}], + scoring=custom_scorer, + refit=False, + error_score=0.1, + ) + + individual_fit_error_message = "ValueError: Failing classifier failed as required" + error_message = re.compile( + ( + "All the 15 fits failed.+your model is misconfigured.+" + f"{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.raises(ValueError, match=error_message): + gs.fit(X, y) + + +def test_n_features_in(): + # make sure grid search and random search delegate n_features_in to the + # best estimator + n_features = 4 + X, y = make_classification(n_features=n_features) + gbdt = HistGradientBoostingClassifier() + param_grid = {"max_iter": [3, 4]} + gs = GridSearchCV(gbdt, param_grid) + rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1) + assert not hasattr(gs, "n_features_in_") + assert not hasattr(rs, "n_features_in_") + gs.fit(X, y) + rs.fit(X, y) + assert gs.n_features_in_ == n_features + assert rs.n_features_in_ == n_features + + +@pytest.mark.parametrize("pairwise", [True, False]) +def test_search_cv_pairwise_property_delegated_to_base_estimator(pairwise): + """ + Test implementation of BaseSearchCV has the pairwise tag + which matches the pairwise tag of its estimator. + This test make sure pairwise tag is delegated to the base estimator. + + Non-regression test for issue #13920. + """ + + class TestEstimator(BaseEstimator): + def _more_tags(self): + return {"pairwise": pairwise} + + est = TestEstimator() + attr_message = "BaseSearchCV pairwise tag must match estimator" + cv = GridSearchCV(est, {"n_neighbors": [10]}) + assert pairwise == cv._get_tags()["pairwise"], attr_message + + +def test_search_cv__pairwise_property_delegated_to_base_estimator(): + """ + Test implementation of BaseSearchCV has the pairwise property + which matches the pairwise tag of its estimator. + This test make sure pairwise tag is delegated to the base estimator. + + Non-regression test for issue #13920. + """ + + class EstimatorPairwise(BaseEstimator): + def __init__(self, pairwise=True): + self.pairwise = pairwise + + def _more_tags(self): + return {"pairwise": self.pairwise} + + est = EstimatorPairwise() + attr_message = "BaseSearchCV _pairwise property must match estimator" + + for _pairwise_setting in [True, False]: + est.set_params(pairwise=_pairwise_setting) + cv = GridSearchCV(est, {"n_neighbors": [10]}) + assert _pairwise_setting == cv._get_tags()["pairwise"], attr_message + + +def test_search_cv_pairwise_property_equivalence_of_precomputed(): + """ + Test implementation of BaseSearchCV has the pairwise tag + which matches the pairwise tag of its estimator. + This test ensures the equivalence of 'precomputed'. + + Non-regression test for issue #13920. + """ + n_samples = 50 + n_splits = 2 + X, y = make_classification(n_samples=n_samples, random_state=0) + grid_params = {"n_neighbors": [10]} + + # defaults to euclidean metric (minkowski p = 2) + clf = KNeighborsClassifier() + cv = GridSearchCV(clf, grid_params, cv=n_splits) + cv.fit(X, y) + preds_original = cv.predict(X) + + # precompute euclidean metric to validate pairwise is working + X_precomputed = euclidean_distances(X) + clf = KNeighborsClassifier(metric="precomputed") + cv = GridSearchCV(clf, grid_params, cv=n_splits) + cv.fit(X_precomputed, y) + preds_precomputed = cv.predict(X_precomputed) + + attr_message = "GridSearchCV not identical with precomputed metric" + assert (preds_original == preds_precomputed).all(), attr_message + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [(GridSearchCV, {"a": [0.1, 0.01]}), (RandomizedSearchCV, {"a": uniform(1, 3)})], +) +def test_scalar_fit_param(SearchCV, param_search): + # unofficially sanctioned tolerance for scalar values in fit_params + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15805 + class TestEstimator(ClassifierMixin, BaseEstimator): + def __init__(self, a=None): + self.a = a + + def fit(self, X, y, r=None): + self.r_ = r + + def predict(self, X): + return np.zeros(shape=(len(X))) + + model = SearchCV(TestEstimator(), param_search) + X, y = make_classification(random_state=42) + model.fit(X, y, r=42) + assert model.best_estimator_.r_ == 42 + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, {"alpha": [0.1, 0.01]}), + (RandomizedSearchCV, {"alpha": uniform(0.01, 0.1)}), + ], +) +def test_scalar_fit_param_compat(SearchCV, param_search): + # check support for scalar values in fit_params, for instance in LightGBM + # that do not exactly respect the scikit-learn API contract but that we do + # not want to break without an explicit deprecation cycle and API + # recommendations for implementing early stopping with a user provided + # validation set. non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15805 + X_train, X_valid, y_train, y_valid = train_test_split( + *make_classification(random_state=42), random_state=42 + ) + + class _FitParamClassifier(SGDClassifier): + def fit( + self, + X, + y, + sample_weight=None, + tuple_of_arrays=None, + scalar_param=None, + callable_param=None, + ): + super().fit(X, y, sample_weight=sample_weight) + assert scalar_param > 0 + assert callable(callable_param) + + # The tuple of arrays should be preserved as tuple. + assert isinstance(tuple_of_arrays, tuple) + assert tuple_of_arrays[0].ndim == 2 + assert tuple_of_arrays[1].ndim == 1 + return self + + def _fit_param_callable(): + pass + + model = SearchCV(_FitParamClassifier(), param_search) + + # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which + # is not the case for the following parameters. But this abuse is common in + # popular third-party libraries and we should tolerate this behavior for + # now and be careful not to break support for those without following + # proper deprecation cycle. + fit_params = { + "tuple_of_arrays": (X_valid, y_valid), + "callable_param": _fit_param_callable, + "scalar_param": 42, + } + model.fit(X_train, y_train, **fit_params) + + +# FIXME: Replace this test with a full `check_estimator` once we have API only +# checks. +@pytest.mark.filterwarnings("ignore:The total space of parameters 4 is") +@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) +@pytest.mark.parametrize("Predictor", [MinimalRegressor, MinimalClassifier]) +def test_search_cv_using_minimal_compatible_estimator(SearchCV, Predictor): + # Check that third-party library can run tests without inheriting from + # BaseEstimator. + rng = np.random.RandomState(0) + X, y = rng.randn(25, 2), np.array([0] * 5 + [1] * 20) + + model = Pipeline( + [("transformer", MinimalTransformer()), ("predictor", Predictor())] + ) + + params = { + "transformer__param": [1, 10], + "predictor__parama": [1, 10], + } + search = SearchCV(model, params, error_score="raise") + search.fit(X, y) + + assert search.best_params_.keys() == params.keys() + + y_pred = search.predict(X) + if is_classifier(search): + assert_array_equal(y_pred, 1) + assert search.score(X, y) == pytest.approx(accuracy_score(y, y_pred)) + else: + assert_allclose(y_pred, y.mean()) + assert search.score(X, y) == pytest.approx(r2_score(y, y_pred)) + + +@pytest.mark.parametrize("return_train_score", [True, False]) +def test_search_cv_verbose_3(capsys, return_train_score): + """Check that search cv with verbose>2 shows the score for single + metrics. non-regression test for #19658.""" + X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [0.1]} + + GridSearchCV( + clf, + grid, + scoring="accuracy", + verbose=3, + cv=3, + return_train_score=return_train_score, + ).fit(X, y) + captured = capsys.readouterr().out + if return_train_score: + match = re.findall(r"score=\(train=[\d\.]+, test=[\d.]+\)", captured) + else: + match = re.findall(r"score=[\d\.]+", captured) + assert len(match) == 3 + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + (HalvingGridSearchCV, "param_grid"), + ], +) +def test_search_estimator_param(SearchCV, param_search): + # test that SearchCV object doesn't change the object given in the parameter grid + X, y = make_classification(random_state=42) + + params = {"clf": [LinearSVC(dual="auto")], "clf__C": [0.01]} + orig_C = params["clf"][0].C + + pipe = Pipeline([("trs", MinimalTransformer()), ("clf", None)]) + + param_grid_search = {param_search: params} + gs = SearchCV(pipe, refit=True, cv=2, scoring="accuracy", **param_grid_search).fit( + X, y + ) + + # testing that the original object in params is not changed + assert params["clf"][0].C == orig_C + # testing that the GS is setting the parameter of the step correctly + assert gs.best_estimator_.named_steps["clf"].C == 0.01 + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + ], +) +def test_multi_metric_search_forwards_metadata(SearchCV, param_search): + """Test that *SearchCV forwards metadata correctly when passed multiple metrics.""" + X, y = make_classification(random_state=42) + n_samples = _num_samples(X) + rng = np.random.RandomState(0) + score_weights = rng.rand(n_samples) + score_metadata = rng.rand(n_samples) + + est = LinearSVC(dual="auto") + param_grid_search = {param_search: {"C": [1]}} + + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry).set_score_request( + sample_weight="score_weights", metadata="score_metadata" + ) + scoring = dict(my_scorer=scorer, accuracy="accuracy") + SearchCV(est, refit="accuracy", cv=2, scoring=scoring, **param_grid_search).fit( + X, y, score_weights=score_weights, score_metadata=score_metadata + ) + assert len(scorer_registry) + for _scorer in scorer_registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight", "metadata"), + sample_weight=score_weights, + metadata=score_metadata, + ) + + +@pytest.mark.parametrize( + "SearchCV, param_search", + [ + (GridSearchCV, "param_grid"), + (RandomizedSearchCV, "param_distributions"), + (HalvingGridSearchCV, "param_grid"), + ], +) +def test_score_rejects_params_with_no_routing_enabled(SearchCV, param_search): + """*SearchCV should reject **params when metadata routing is not enabled + since this is added only when routing is enabled.""" + X, y = make_classification(random_state=42) + est = LinearSVC(dual="auto") + param_grid_search = {param_search: {"C": [1]}} + + gs = SearchCV(est, cv=2, **param_grid_search).fit(X, y) + + with pytest.raises(ValueError, match="is only supported if"): + gs.score(X, y, metadata=1) + + +# End of Metadata Routing Tests +# ============================= diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py new file mode 100644 index 0000000000000000000000000000000000000000..57bc6b22351b9403c1939da22c31b6da3241886f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_split.py @@ -0,0 +1,2025 @@ +"""Test the split module""" +import re +import warnings +from itertools import combinations, combinations_with_replacement, permutations + +import numpy as np +import pytest +from scipy import stats +from scipy.sparse import issparse +from scipy.special import comb + +from sklearn import config_context +from sklearn.datasets import load_digits, make_classification +from sklearn.dummy import DummyClassifier +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + LeavePOut, + PredefinedSplit, + RepeatedKFold, + RepeatedStratifiedKFold, + ShuffleSplit, + StratifiedGroupKFold, + StratifiedKFold, + StratifiedShuffleSplit, + TimeSeriesSplit, + check_cv, + cross_val_score, + train_test_split, +) +from sklearn.model_selection._split import ( + _build_repr, + _validate_shuffle_split, + _yields_constant_splits, +) +from sklearn.svm import SVC +from sklearn.tests.metadata_routing_common import assert_request_is_empty +from sklearn.utils._array_api import ( + _convert_to_numpy, + get_namespace, + yield_namespace_device_dtype_combinations, +) +from sklearn.utils._array_api import ( + device as array_api_device, +) +from sklearn.utils._mocking import MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + ignore_warnings, +) +from sklearn.utils.estimator_checks import ( + _array_api_for_tests, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + +NO_GROUP_SPLITTERS = [ + KFold(), + StratifiedKFold(), + TimeSeriesSplit(), + LeaveOneOut(), + LeavePOut(p=2), + ShuffleSplit(), + StratifiedShuffleSplit(test_size=0.5), + PredefinedSplit([1, 1, 2, 2]), + RepeatedKFold(), + RepeatedStratifiedKFold(), +] + +GROUP_SPLITTERS = [ + GroupKFold(), + LeavePGroupsOut(n_groups=1), + StratifiedGroupKFold(), + LeaveOneGroupOut(), + GroupShuffleSplit(), +] + +ALL_SPLITTERS = NO_GROUP_SPLITTERS + GROUP_SPLITTERS # type: ignore + +X = np.ones(10) +y = np.arange(10) // 2 +test_groups = ( + np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), + np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), + [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], + ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"], +) +digits = load_digits() + + +@ignore_warnings +def test_cross_validator_with_default_params(): + n_samples = 4 + n_unique_groups = 4 + n_splits = 2 + p = 2 + n_shuffle_splits = 10 # (the default value) + + X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + X_1d = np.array([1, 2, 3, 4]) + y = np.array([1, 1, 2, 2]) + groups = np.array([1, 2, 3, 4]) + loo = LeaveOneOut() + lpo = LeavePOut(p) + kf = KFold(n_splits) + skf = StratifiedKFold(n_splits) + lolo = LeaveOneGroupOut() + lopo = LeavePGroupsOut(p) + ss = ShuffleSplit(random_state=0) + ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2 + sgkf = StratifiedGroupKFold(n_splits) + + loo_repr = "LeaveOneOut()" + lpo_repr = "LeavePOut(p=2)" + kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)" + skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)" + lolo_repr = "LeaveOneGroupOut()" + lopo_repr = "LeavePGroupsOut(n_groups=2)" + ss_repr = ( + "ShuffleSplit(n_splits=10, random_state=0, test_size=None, train_size=None)" + ) + ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))" + sgkf_repr = "StratifiedGroupKFold(n_splits=2, random_state=None, shuffle=False)" + + n_splits_expected = [ + n_samples, + comb(n_samples, p), + n_splits, + n_splits, + n_unique_groups, + comb(n_unique_groups, p), + n_shuffle_splits, + 2, + n_splits, + ] + + for i, (cv, cv_repr) in enumerate( + zip( + [loo, lpo, kf, skf, lolo, lopo, ss, ps, sgkf], + [ + loo_repr, + lpo_repr, + kf_repr, + skf_repr, + lolo_repr, + lopo_repr, + ss_repr, + ps_repr, + sgkf_repr, + ], + ) + ): + # Test if get_n_splits works correctly + assert n_splits_expected[i] == cv.get_n_splits(X, y, groups) + + # Test if the cross-validator works as expected even if + # the data is 1d + np.testing.assert_equal( + list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups)) + ) + # Test that train, test indices returned are integers + for train, test in cv.split(X, y, groups): + assert np.asarray(train).dtype.kind == "i" + assert np.asarray(test).dtype.kind == "i" + + # Test if the repr works without any errors + assert cv_repr == repr(cv) + + # ValueError for get_n_splits methods + msg = "The 'X' parameter should not be None." + with pytest.raises(ValueError, match=msg): + loo.get_n_splits(None, y, groups) + with pytest.raises(ValueError, match=msg): + lpo.get_n_splits(None, y, groups) + + +def test_2d_y(): + # smoke test for 2d y and multi-label + n_samples = 30 + rng = np.random.RandomState(1) + X = rng.randint(0, 3, size=(n_samples, 2)) + y = rng.randint(0, 3, size=(n_samples,)) + y_2d = y.reshape(-1, 1) + y_multilabel = rng.randint(0, 2, size=(n_samples, 3)) + groups = rng.randint(0, 3, size=(n_samples,)) + splitters = [ + LeaveOneOut(), + LeavePOut(p=2), + KFold(), + StratifiedKFold(), + RepeatedKFold(), + RepeatedStratifiedKFold(), + StratifiedGroupKFold(), + ShuffleSplit(), + StratifiedShuffleSplit(test_size=0.5), + GroupShuffleSplit(), + LeaveOneGroupOut(), + LeavePGroupsOut(n_groups=2), + GroupKFold(n_splits=3), + TimeSeriesSplit(), + PredefinedSplit(test_fold=groups), + ] + for splitter in splitters: + list(splitter.split(X, y, groups)) + list(splitter.split(X, y_2d, groups)) + try: + list(splitter.split(X, y_multilabel, groups)) + except ValueError as e: + allowed_target_types = ("binary", "multiclass") + msg = "Supported target types are: {}. Got 'multilabel".format( + allowed_target_types + ) + assert msg in str(e) + + +def check_valid_split(train, test, n_samples=None): + # Use python sets to get more informative assertion failure messages + train, test = set(train), set(test) + + # Train and test split should not overlap + assert train.intersection(test) == set() + + if n_samples is not None: + # Check that the union of train an test split cover all the indices + assert train.union(test) == set(range(n_samples)) + + +def check_cv_coverage(cv, X, y, groups, expected_n_splits): + n_samples = _num_samples(X) + # Check that a all the samples appear at least once in a test fold + assert cv.get_n_splits(X, y, groups) == expected_n_splits + + collected_test_samples = set() + iterations = 0 + for train, test in cv.split(X, y, groups): + check_valid_split(train, test, n_samples=n_samples) + iterations += 1 + collected_test_samples.update(test) + + # Check that the accumulated test samples cover the whole dataset + assert iterations == expected_n_splits + if n_samples is not None: + assert collected_test_samples == set(range(n_samples)) + + +def test_kfold_valueerrors(): + X1 = np.array([[1, 2], [3, 4], [5, 6]]) + X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) + # Check that errors are raised if there is not enough samples + (ValueError, next, KFold(4).split(X1)) + + # Check that a warning is raised if the least populated class has too few + # members. + y = np.array([3, 3, -1, -1, 3]) + + skf_3 = StratifiedKFold(3) + with pytest.warns(Warning, match="The least populated class"): + next(skf_3.split(X2, y)) + + sgkf_3 = StratifiedGroupKFold(3) + naive_groups = np.arange(len(y)) + with pytest.warns(Warning, match="The least populated class"): + next(sgkf_3.split(X2, y, naive_groups)) + + # Check that despite the warning the folds are still computed even + # though all the classes are not necessarily represented at on each + # side of the split at each split + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + check_cv_coverage(sgkf_3, X2, y, groups=naive_groups, expected_n_splits=3) + + # Check that errors are raised if all n_groups for individual + # classes are less than n_splits. + y = np.array([3, 3, -1, -1, 2]) + + with pytest.raises(ValueError): + next(skf_3.split(X2, y)) + with pytest.raises(ValueError): + next(sgkf_3.split(X2, y)) + + # Error when number of folds is <= 1 + with pytest.raises(ValueError): + KFold(0) + with pytest.raises(ValueError): + KFold(1) + error_string = "k-fold cross-validation requires at least one train/test split" + with pytest.raises(ValueError, match=error_string): + StratifiedKFold(0) + with pytest.raises(ValueError, match=error_string): + StratifiedKFold(1) + with pytest.raises(ValueError, match=error_string): + StratifiedGroupKFold(0) + with pytest.raises(ValueError, match=error_string): + StratifiedGroupKFold(1) + + # When n_splits is not integer: + with pytest.raises(ValueError): + KFold(1.5) + with pytest.raises(ValueError): + KFold(2.0) + with pytest.raises(ValueError): + StratifiedKFold(1.5) + with pytest.raises(ValueError): + StratifiedKFold(2.0) + with pytest.raises(ValueError): + StratifiedGroupKFold(1.5) + with pytest.raises(ValueError): + StratifiedGroupKFold(2.0) + + # When shuffle is not a bool: + with pytest.raises(TypeError): + KFold(n_splits=4, shuffle=None) + + +def test_kfold_indices(): + # Check all indices are returned in the test folds + X1 = np.ones(18) + kf = KFold(3) + check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3) + + # Check all indices are returned in the test folds even when equal-sized + # folds are not possible + X2 = np.ones(17) + kf = KFold(3) + check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3) + + # Check if get_n_splits returns the number of folds + assert 5 == KFold(5).get_n_splits(X2) + + +def test_kfold_no_shuffle(): + # Manually check that KFold preserves the data ordering on toy datasets + X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + + splits = KFold(2).split(X2[:-1]) + train, test = next(splits) + assert_array_equal(test, [0, 1]) + assert_array_equal(train, [2, 3]) + + train, test = next(splits) + assert_array_equal(test, [2, 3]) + assert_array_equal(train, [0, 1]) + + splits = KFold(2).split(X2) + train, test = next(splits) + assert_array_equal(test, [0, 1, 2]) + assert_array_equal(train, [3, 4]) + + train, test = next(splits) + assert_array_equal(test, [3, 4]) + assert_array_equal(train, [0, 1, 2]) + + +def test_stratified_kfold_no_shuffle(): + # Manually check that StratifiedKFold preserves the data ordering as much + # as possible on toy datasets in order to avoid hiding sample dependencies + # when possible + X, y = np.ones(4), [1, 1, 0, 0] + splits = StratifiedKFold(2).split(X, y) + train, test = next(splits) + assert_array_equal(test, [0, 2]) + assert_array_equal(train, [1, 3]) + + train, test = next(splits) + assert_array_equal(test, [1, 3]) + assert_array_equal(train, [0, 2]) + + X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0] + splits = StratifiedKFold(2).split(X, y) + train, test = next(splits) + assert_array_equal(test, [0, 1, 3, 4]) + assert_array_equal(train, [2, 5, 6]) + + train, test = next(splits) + assert_array_equal(test, [2, 5, 6]) + assert_array_equal(train, [0, 1, 3, 4]) + + # Check if get_n_splits returns the number of folds + assert 5 == StratifiedKFold(5).get_n_splits(X, y) + + # Make sure string labels are also supported + X = np.ones(7) + y1 = ["1", "1", "1", "0", "0", "0", "0"] + y2 = [1, 1, 1, 0, 0, 0, 0] + np.testing.assert_equal( + list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2)) + ) + + # Check equivalence to KFold + y = [0, 1, 0, 1, 0, 1, 0, 1] + X = np.ones_like(y) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y)), list(KFold(3).split(X, y)) + ) + + +@pytest.mark.parametrize("shuffle", [False, True]) +@pytest.mark.parametrize("k", [4, 5, 6, 7, 8, 9, 10]) +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratified_kfold_ratios(k, shuffle, kfold): + # Check that stratified kfold preserves class ratios in individual splits + # Repeat with shuffling turned off and on + n_samples = 1000 + X = np.ones(n_samples) + y = np.array( + [4] * int(0.10 * n_samples) + + [0] * int(0.89 * n_samples) + + [1] * int(0.01 * n_samples) + ) + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + distr = np.bincount(y) / len(y) + + test_sizes = [] + random_state = None if not shuffle else 0 + skf = kfold(k, random_state=random_state, shuffle=shuffle) + for train, test in skf.split(X, y, groups=groups): + assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) + assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +@pytest.mark.parametrize("shuffle", [False, True]) +@pytest.mark.parametrize("k", [4, 6, 7]) +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratified_kfold_label_invariance(k, shuffle, kfold): + # Check that stratified kfold gives the same indices regardless of labels + n_samples = 100 + y = np.array( + [2] * int(0.10 * n_samples) + + [0] * int(0.89 * n_samples) + + [1] * int(0.01 * n_samples) + ) + X = np.ones(len(y)) + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + + def get_splits(y): + random_state = None if not shuffle else 0 + return [ + (list(train), list(test)) + for train, test in kfold( + k, random_state=random_state, shuffle=shuffle + ).split(X, y, groups=groups) + ] + + splits_base = get_splits(y) + for perm in permutations([0, 1, 2]): + y_perm = np.take(perm, y) + splits_perm = get_splits(y_perm) + assert splits_perm == splits_base + + +def test_kfold_balance(): + # Check that KFold returns folds with balanced sizes + for i in range(11, 17): + kf = KFold(5).split(X=np.ones(i)) + sizes = [len(test) for _, test in kf] + + assert (np.max(sizes) - np.min(sizes)) <= 1 + assert np.sum(sizes) == i + + +@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold]) +def test_stratifiedkfold_balance(kfold): + # Check that KFold returns folds with balanced sizes (only when + # stratification is possible) + # Repeat with shuffling turned off and on + X = np.ones(17) + y = [0] * 3 + [1] * 14 + # ensure perfect stratification with StratifiedGroupKFold + groups = np.arange(len(y)) + + for shuffle in (True, False): + cv = kfold(3, shuffle=shuffle) + for i in range(11, 17): + skf = cv.split(X[:i], y[:i], groups[:i]) + sizes = [len(test) for _, test in skf] + + assert (np.max(sizes) - np.min(sizes)) <= 1 + assert np.sum(sizes) == i + + +def test_shuffle_kfold(): + # Check the indices are shuffled properly + kf = KFold(3) + kf2 = KFold(3, shuffle=True, random_state=0) + kf3 = KFold(3, shuffle=True, random_state=1) + + X = np.ones(300) + + all_folds = np.zeros(300) + for (tr1, te1), (tr2, te2), (tr3, te3) in zip( + kf.split(X), kf2.split(X), kf3.split(X) + ): + for tr_a, tr_b in combinations((tr1, tr2, tr3), 2): + # Assert that there is no complete overlap + assert len(np.intersect1d(tr_a, tr_b)) != len(tr1) + + # Set all test indices in successive iterations of kf2 to 1 + all_folds[te2] = 1 + + # Check that all indices are returned in the different test folds + assert sum(all_folds) == 300 + + +@pytest.mark.parametrize("kfold", [KFold, StratifiedKFold, StratifiedGroupKFold]) +def test_shuffle_kfold_stratifiedkfold_reproducibility(kfold): + X = np.ones(15) # Divisible by 3 + y = [0] * 7 + [1] * 8 + groups_1 = np.arange(len(y)) + X2 = np.ones(16) # Not divisible by 3 + y2 = [0] * 8 + [1] * 8 + groups_2 = np.arange(len(y2)) + + # Check that when the shuffle is True, multiple split calls produce the + # same split when random_state is int + kf = kfold(3, shuffle=True, random_state=0) + + np.testing.assert_equal( + list(kf.split(X, y, groups_1)), list(kf.split(X, y, groups_1)) + ) + + # Check that when the shuffle is True, multiple split calls often + # (not always) produce different splits when random_state is + # RandomState instance or None + kf = kfold(3, shuffle=True, random_state=np.random.RandomState(0)) + for data in zip((X, X2), (y, y2), (groups_1, groups_2)): + # Test if the two splits are different cv + for (_, test_a), (_, test_b) in zip(kf.split(*data), kf.split(*data)): + # cv.split(...) returns an array of tuples, each tuple + # consisting of an array with train indices and test indices + # Ensure that the splits for data are not same + # when random state is not set + with pytest.raises(AssertionError): + np.testing.assert_array_equal(test_a, test_b) + + +def test_shuffle_stratifiedkfold(): + # Check that shuffling is happening when requested, and for proper + # sample coverage + X_40 = np.ones(40) + y = [0] * 20 + [1] * 20 + kf0 = StratifiedKFold(5, shuffle=True, random_state=0) + kf1 = StratifiedKFold(5, shuffle=True, random_state=1) + for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)): + assert set(test0) != set(test1) + check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5) + + # Ensure that we shuffle each class's samples with different + # random_state in StratifiedKFold + # See https://github.com/scikit-learn/scikit-learn/pull/13124 + X = np.arange(10) + y = [0] * 5 + [1] * 5 + kf1 = StratifiedKFold(5, shuffle=True, random_state=0) + kf2 = StratifiedKFold(5, shuffle=True, random_state=1) + test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)]) + test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)]) + assert test_set1 != test_set2 + + +def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 + # The digits samples are dependent: they are apparently grouped by authors + # although we don't have any information on the groups segment locations + # for this data. We can highlight this fact by computing k-fold cross- + # validation with and without shuffling: we observe that the shuffling case + # wrongly makes the IID assumption and is therefore too optimistic: it + # estimates a much higher accuracy (around 0.93) than that the non + # shuffling variant (around 0.81). + + X, y = digits.data[:600], digits.target[:600] + model = SVC(C=10, gamma=0.005) + + n_splits = 3 + + cv = KFold(n_splits=n_splits, shuffle=False) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert 0.92 > mean_score + assert mean_score > 0.80 + + # Shuffling the data artificially breaks the dependency and hides the + # overfitting of the model with regards to the writing style of the authors + # by yielding a seriously overestimated score: + + cv = KFold(n_splits, shuffle=True, random_state=0) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert mean_score > 0.92 + + cv = KFold(n_splits, shuffle=True, random_state=1) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert mean_score > 0.92 + + # Similarly, StratifiedKFold should try to shuffle the data as little + # as possible (while respecting the balanced class constraints) + # and thus be able to detect the dependency by not overestimating + # the CV score either. As the digits dataset is approximately balanced + # the estimated mean score is close to the score measured with + # non-shuffled KFold + + cv = StratifiedKFold(n_splits) + mean_score = cross_val_score(model, X, y, cv=cv).mean() + assert 0.94 > mean_score + assert mean_score > 0.80 + + +def test_stratified_group_kfold_trivial(): + sgkf = StratifiedGroupKFold(n_splits=3) + # Trivial example - groups with the same distribution + y = np.array([1] * 6 + [0] * 12) + X = np.ones_like(y).reshape(-1, 1) + groups = np.asarray((1, 2, 3, 4, 5, 6, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6)) + distr = np.bincount(y) / len(y) + test_sizes = [] + for train, test in sgkf.split(X, y, groups): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + # check y distribution + assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02) + assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +def test_stratified_group_kfold_approximate(): + # Not perfect stratification (even though it is possible) because of + # iteration over groups + sgkf = StratifiedGroupKFold(n_splits=3) + y = np.array([1] * 6 + [0] * 12) + X = np.ones_like(y).reshape(-1, 1) + groups = np.array([1, 2, 3, 3, 4, 4, 1, 1, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6]) + expected = np.asarray([[0.833, 0.166], [0.666, 0.333], [0.5, 0.5]]) + test_sizes = [] + for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + split_dist = np.bincount(y[test]) / len(test) + assert_allclose(split_dist, expect_dist, atol=0.001) + test_sizes.append(len(test)) + assert np.ptp(test_sizes) <= 1 + + +@pytest.mark.parametrize( + "y, groups, expected", + [ + ( + np.array([0] * 6 + [1] * 6), + np.array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]), + np.asarray([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]), + ), + ( + np.array([0] * 9 + [1] * 3), + np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6]), + np.asarray([[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]), + ), + ], +) +def test_stratified_group_kfold_homogeneous_groups(y, groups, expected): + sgkf = StratifiedGroupKFold(n_splits=3) + X = np.ones_like(y).reshape(-1, 1) + for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected): + # check group constraint + assert np.intersect1d(groups[train], groups[test]).size == 0 + split_dist = np.bincount(y[test]) / len(test) + assert_allclose(split_dist, expect_dist, atol=0.001) + + +@pytest.mark.parametrize("cls_distr", [(0.4, 0.6), (0.3, 0.7), (0.2, 0.8), (0.8, 0.2)]) +@pytest.mark.parametrize("n_groups", [5, 30, 70]) +def test_stratified_group_kfold_against_group_kfold(cls_distr, n_groups): + # Check that given sufficient amount of samples StratifiedGroupKFold + # produces better stratified folds than regular GroupKFold + n_splits = 5 + sgkf = StratifiedGroupKFold(n_splits=n_splits) + gkf = GroupKFold(n_splits=n_splits) + rng = np.random.RandomState(0) + n_points = 1000 + y = rng.choice(2, size=n_points, p=cls_distr) + X = np.ones_like(y).reshape(-1, 1) + g = rng.choice(n_groups, n_points) + sgkf_folds = sgkf.split(X, y, groups=g) + gkf_folds = gkf.split(X, y, groups=g) + sgkf_entr = 0 + gkf_entr = 0 + for (sgkf_train, sgkf_test), (_, gkf_test) in zip(sgkf_folds, gkf_folds): + # check group constraint + assert np.intersect1d(g[sgkf_train], g[sgkf_test]).size == 0 + sgkf_distr = np.bincount(y[sgkf_test]) / len(sgkf_test) + gkf_distr = np.bincount(y[gkf_test]) / len(gkf_test) + sgkf_entr += stats.entropy(sgkf_distr, qk=cls_distr) + gkf_entr += stats.entropy(gkf_distr, qk=cls_distr) + sgkf_entr /= n_splits + gkf_entr /= n_splits + assert sgkf_entr <= gkf_entr + + +def test_shuffle_split(): + ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X) + ss2 = ShuffleSplit(test_size=2, random_state=0).split(X) + ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X) + ss4 = ShuffleSplit(test_size=int(2), random_state=0).split(X) + for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): + assert_array_equal(t1[0], t2[0]) + assert_array_equal(t2[0], t3[0]) + assert_array_equal(t3[0], t4[0]) + assert_array_equal(t1[1], t2[1]) + assert_array_equal(t2[1], t3[1]) + assert_array_equal(t3[1], t4[1]) + + +@pytest.mark.parametrize("split_class", [ShuffleSplit, StratifiedShuffleSplit]) +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 9, 1), (8, 8, 2), (0.8, 8, 2)] +) +def test_shuffle_split_default_test_size(split_class, train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. 0.1 if both + # unspecified or complement train_size unless both are specified. + X = np.ones(10) + y = np.ones(10) + + X_train, X_test = next(split_class(train_size=train_size).split(X, y)) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 8, 2), (7, 7, 3), (0.7, 7, 3)] +) +def test_group_shuffle_split_default_test_size(train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. 0.2 if both + # unspecified or complement train_size unless both are specified. + X = np.ones(10) + y = np.ones(10) + groups = range(10) + + X_train, X_test = next(GroupShuffleSplit(train_size=train_size).split(X, y, groups)) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@ignore_warnings +def test_stratified_shuffle_split_init(): + X = np.arange(7) + y = np.asarray([0, 1, 1, 1, 2, 2, 2]) + # Check that error is raised if there is a class with only one sample + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=0.2).split(X, y)) + + # Check that error is raised if the test set size is smaller than n_classes + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=2).split(X, y)) + # Check that error is raised if the train set size is smaller than + # n_classes + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(3, test_size=3, train_size=2).split(X, y)) + + X = np.arange(9) + y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) + + # Train size or test size too small + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(train_size=2).split(X, y)) + with pytest.raises(ValueError): + next(StratifiedShuffleSplit(test_size=2).split(X, y)) + + +def test_stratified_shuffle_split_respects_test_size(): + y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]) + test_size = 5 + train_size = 10 + sss = StratifiedShuffleSplit( + 6, test_size=test_size, train_size=train_size, random_state=0 + ).split(np.ones(len(y)), y) + for train, test in sss: + assert len(train) == train_size + assert len(test) == test_size + + +def test_stratified_shuffle_split_iter(): + ys = [ + np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), + np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), + np.array([-1] * 800 + [1] * 50), + np.concatenate([[i] * (100 + i) for i in range(11)]), + [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], + ["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"], + ] + + for y in ys: + sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split( + np.ones(len(y)), y + ) + y = np.asanyarray(y) # To make it indexable for y[train] + # this is how test-size is computed internally + # in _validate_shuffle_split + test_size = np.ceil(0.33 * len(y)) + train_size = len(y) - test_size + for train, test in sss: + assert_array_equal(np.unique(y[train]), np.unique(y[test])) + # Checks if folds keep classes proportions + p_train = np.bincount(np.unique(y[train], return_inverse=True)[1]) / float( + len(y[train]) + ) + p_test = np.bincount(np.unique(y[test], return_inverse=True)[1]) / float( + len(y[test]) + ) + assert_array_almost_equal(p_train, p_test, 1) + assert len(train) + len(test) == y.size + assert len(train) == train_size + assert len(test) == test_size + assert_array_equal(np.intersect1d(train, test), []) + + +def test_stratified_shuffle_split_even(): + # Test the StratifiedShuffleSplit, indices are drawn with a + # equal chance + n_folds = 5 + n_splits = 1000 + + def assert_counts_are_ok(idx_counts, p): + # Here we test that the distribution of the counts + # per index is close enough to a binomial + threshold = 0.05 / n_splits + bf = stats.binom(n_splits, p) + for count in idx_counts: + prob = bf.pmf(count) + assert ( + prob > threshold + ), "An index is not drawn with chance corresponding to even draws" + + for n_samples in (6, 22): + groups = np.array((n_samples // 2) * [0, 1]) + splits = StratifiedShuffleSplit( + n_splits=n_splits, test_size=1.0 / n_folds, random_state=0 + ) + + train_counts = [0] * n_samples + test_counts = [0] * n_samples + n_splits_actual = 0 + for train, test in splits.split(X=np.ones(n_samples), y=groups): + n_splits_actual += 1 + for counter, ids in [(train_counts, train), (test_counts, test)]: + for id in ids: + counter[id] += 1 + assert n_splits_actual == n_splits + + n_train, n_test = _validate_shuffle_split( + n_samples, test_size=1.0 / n_folds, train_size=1.0 - (1.0 / n_folds) + ) + + assert len(train) == n_train + assert len(test) == n_test + assert len(set(train).intersection(test)) == 0 + + group_counts = np.unique(groups) + assert splits.test_size == 1.0 / n_folds + assert n_train + n_test == len(groups) + assert len(group_counts) == 2 + ex_test_p = float(n_test) / n_samples + ex_train_p = float(n_train) / n_samples + + assert_counts_are_ok(train_counts, ex_train_p) + assert_counts_are_ok(test_counts, ex_test_p) + + +def test_stratified_shuffle_split_overlap_train_test_bug(): + # See https://github.com/scikit-learn/scikit-learn/issues/6121 for + # the original bug report + y = [0, 1, 2, 3] * 3 + [4, 5] * 5 + X = np.ones_like(y) + + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + + train, test = next(sss.split(X=X, y=y)) + + # no overlap + assert_array_equal(np.intersect1d(train, test), []) + + # complete partition + assert_array_equal(np.union1d(train, test), np.arange(len(y))) + + +def test_stratified_shuffle_split_multilabel(): + # fix for issue 9037 + for y in [ + np.array([[0, 1], [1, 0], [1, 0], [0, 1]]), + np.array([[0, 1], [1, 1], [1, 1], [0, 1]]), + ]: + X = np.ones_like(y) + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + train, test = next(sss.split(X=X, y=y)) + y_train = y[train] + y_test = y[test] + + # no overlap + assert_array_equal(np.intersect1d(train, test), []) + + # complete partition + assert_array_equal(np.union1d(train, test), np.arange(len(y))) + + # correct stratification of entire rows + # (by design, here y[:, 0] uniquely determines the entire row of y) + expected_ratio = np.mean(y[:, 0]) + assert expected_ratio == np.mean(y_train[:, 0]) + assert expected_ratio == np.mean(y_test[:, 0]) + + +def test_stratified_shuffle_split_multilabel_many_labels(): + # fix in PR #9922: for multilabel data with > 1000 labels, str(row) + # truncates with an ellipsis for elements in positions 4 through + # len(row) - 4, so labels were not being correctly split using the powerset + # method for transforming a multilabel problem to a multiclass one; this + # test checks that this problem is fixed. + row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1] + row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1] + y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100) + X = np.ones_like(y) + + sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) + train, test = next(sss.split(X=X, y=y)) + y_train = y[train] + y_test = y[test] + + # correct stratification of entire rows + # (by design, here y[:, 4] uniquely determines the entire row of y) + expected_ratio = np.mean(y[:, 4]) + assert expected_ratio == np.mean(y_train[:, 4]) + assert expected_ratio == np.mean(y_test[:, 4]) + + +def test_predefinedsplit_with_kfold_split(): + # Check that PredefinedSplit can reproduce a split generated by Kfold. + folds = np.full(10, -1.0) + kf_train = [] + kf_test = [] + for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)): + kf_train.append(train_ind) + kf_test.append(test_ind) + folds[test_ind] = i + ps = PredefinedSplit(folds) + # n_splits is simply the no of unique folds + assert len(np.unique(folds)) == ps.get_n_splits() + ps_train, ps_test = zip(*ps.split()) + assert_array_equal(ps_train, kf_train) + assert_array_equal(ps_test, kf_test) + + +def test_group_shuffle_split(): + for groups_i in test_groups: + X = y = np.ones(len(groups_i)) + n_splits = 6 + test_size = 1.0 / 3 + slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0) + + # Make sure the repr works + repr(slo) + + # Test that the length is correct + assert slo.get_n_splits(X, y, groups=groups_i) == n_splits + + l_unique = np.unique(groups_i) + l = np.asarray(groups_i) + + for train, test in slo.split(X, y, groups=groups_i): + # First test: no train group is in the test set and vice versa + l_train_unique = np.unique(l[train]) + l_test_unique = np.unique(l[test]) + assert not np.any(np.isin(l[train], l_test_unique)) + assert not np.any(np.isin(l[test], l_train_unique)) + + # Second test: train and test add up to all the data + assert l[train].size + l[test].size == l.size + + # Third test: train and test are disjoint + assert_array_equal(np.intersect1d(train, test), []) + + # Fourth test: + # unique train and test groups are correct, +- 1 for rounding error + assert abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1 + assert ( + abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1 + ) + + +def test_leave_one_p_group_out(): + logo = LeaveOneGroupOut() + lpgo_1 = LeavePGroupsOut(n_groups=1) + lpgo_2 = LeavePGroupsOut(n_groups=2) + + # Make sure the repr works + assert repr(logo) == "LeaveOneGroupOut()" + assert repr(lpgo_1) == "LeavePGroupsOut(n_groups=1)" + assert repr(lpgo_2) == "LeavePGroupsOut(n_groups=2)" + assert repr(LeavePGroupsOut(n_groups=3)) == "LeavePGroupsOut(n_groups=3)" + + for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))): + for i, groups_i in enumerate(test_groups): + n_groups = len(np.unique(groups_i)) + n_splits = n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2 + X = y = np.ones(len(groups_i)) + + # Test that the length is correct + assert cv.get_n_splits(X, y, groups=groups_i) == n_splits + + groups_arr = np.asarray(groups_i) + + # Split using the original list / array / list of string groups_i + for train, test in cv.split(X, y, groups=groups_i): + # First test: no train group is in the test set and vice versa + assert_array_equal( + np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), [] + ) + + # Second test: train and test add up to all the data + assert len(train) + len(test) == len(groups_i) + + # Third test: + # The number of groups in test must be equal to p_groups_out + assert np.unique(groups_arr[test]).shape[0], p_groups_out + + # check get_n_splits() with dummy parameters + assert logo.get_n_splits(None, None, ["a", "b", "c", "b", "c"]) == 3 + assert logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]) == 3 + assert lpgo_2.get_n_splits(None, None, np.arange(4)) == 6 + assert lpgo_1.get_n_splits(groups=np.arange(4)) == 4 + + # raise ValueError if a `groups` parameter is illegal + with pytest.raises(ValueError): + logo.get_n_splits(None, None, [0.0, np.nan, 0.0]) + with pytest.raises(ValueError): + lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0]) + + msg = "The 'groups' parameter should not be None." + with pytest.raises(ValueError, match=msg): + logo.get_n_splits(None, None, None) + with pytest.raises(ValueError, match=msg): + lpgo_1.get_n_splits(None, None, None) + + +def test_leave_group_out_changing_groups(): + # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if + # the groups variable is changed before calling split + groups = np.array([0, 1, 2, 1, 1, 2, 0, 0]) + X = np.ones(len(groups)) + groups_changing = np.array(groups, copy=True) + lolo = LeaveOneGroupOut().split(X, groups=groups) + lolo_changing = LeaveOneGroupOut().split(X, groups=groups) + lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups) + lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups) + groups_changing[:] = 0 + for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: + for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): + assert_array_equal(train, train_chan) + assert_array_equal(test, test_chan) + + # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3 + assert 3 == LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups) + # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups) + assert 3 == LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups) + + +def test_leave_group_out_order_dependence(): + # Check that LeaveOneGroupOut orders the splits according to the index + # of the group left out. + groups = np.array([2, 2, 0, 0, 1, 1]) + X = np.ones(len(groups)) + + splits = iter(LeaveOneGroupOut().split(X, groups=groups)) + + expected_indices = [ + ([0, 1, 4, 5], [2, 3]), + ([0, 1, 2, 3], [4, 5]), + ([2, 3, 4, 5], [0, 1]), + ] + + for expected_train, expected_test in expected_indices: + train, test = next(splits) + assert_array_equal(train, expected_train) + assert_array_equal(test, expected_test) + + +def test_leave_one_p_group_out_error_on_fewer_number_of_groups(): + X = y = groups = np.ones(0) + msg = re.escape("Found array with 0 sample(s)") + with pytest.raises(ValueError, match=msg): + next(LeaveOneGroupOut().split(X, y, groups)) + + X = y = groups = np.ones(1) + msg = re.escape( + f"The groups parameter contains fewer than 2 unique groups ({groups})." + " LeaveOneGroupOut expects at least 2." + ) + with pytest.raises(ValueError, match=msg): + next(LeaveOneGroupOut().split(X, y, groups)) + + X = y = groups = np.ones(1) + msg = re.escape( + "The groups parameter contains fewer than (or equal to) n_groups " + f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects " + "that at least n_groups + 1 (4) unique groups " + "be present" + ) + with pytest.raises(ValueError, match=msg): + next(LeavePGroupsOut(n_groups=3).split(X, y, groups)) + + X = y = groups = np.arange(3) + msg = re.escape( + "The groups parameter contains fewer than (or equal to) n_groups " + f"(3) numbers of unique groups ({groups}). LeavePGroupsOut expects " + "that at least n_groups + 1 (4) unique groups " + "be present" + ) + with pytest.raises(ValueError, match=msg): + next(LeavePGroupsOut(n_groups=3).split(X, y, groups)) + + +@ignore_warnings +def test_repeated_cv_value_errors(): + # n_repeats is not integer or <= 0 + for cv in (RepeatedKFold, RepeatedStratifiedKFold): + with pytest.raises(ValueError): + cv(n_repeats=0) + with pytest.raises(ValueError): + cv(n_repeats=1.5) + + +@pytest.mark.parametrize("RepeatedCV", [RepeatedKFold, RepeatedStratifiedKFold]) +def test_repeated_cv_repr(RepeatedCV): + n_splits, n_repeats = 2, 6 + repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats) + repeated_cv_repr = "{}(n_repeats=6, n_splits=2, random_state=None)".format( + repeated_cv.__class__.__name__ + ) + assert repeated_cv_repr == repr(repeated_cv) + + +def test_repeated_kfold_determinstic_split(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + random_state = 258173307 + rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state) + + # split should produce same and deterministic splits on + # each call + for _ in range(3): + splits = rkf.split(X) + train, test = next(splits) + assert_array_equal(train, [2, 4]) + assert_array_equal(test, [0, 1, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 3]) + assert_array_equal(test, [2, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [2, 3, 4]) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4]) + assert_array_equal(test, [0, 1]) + + with pytest.raises(StopIteration): + next(splits) + + +def test_get_n_splits_for_repeated_kfold(): + n_splits = 3 + n_repeats = 4 + rkf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats) + expected_n_splits = n_splits * n_repeats + assert expected_n_splits == rkf.get_n_splits() + + +def test_get_n_splits_for_repeated_stratified_kfold(): + n_splits = 3 + n_repeats = 4 + rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats) + expected_n_splits = n_splits * n_repeats + assert expected_n_splits == rskf.get_n_splits() + + +def test_repeated_stratified_kfold_determinstic_split(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + y = [1, 1, 1, 0, 0] + random_state = 1944695409 + rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, random_state=random_state) + + # split should produce same and deterministic splits on + # each call + for _ in range(3): + splits = rskf.split(X, y) + train, test = next(splits) + assert_array_equal(train, [1, 4]) + assert_array_equal(test, [0, 2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 2, 3]) + assert_array_equal(test, [1, 4]) + + train, test = next(splits) + assert_array_equal(train, [2, 3]) + assert_array_equal(test, [0, 1, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 4]) + assert_array_equal(test, [2, 3]) + + with pytest.raises(StopIteration): + next(splits) + + +def test_train_test_split_errors(): + pytest.raises(ValueError, train_test_split) + + pytest.raises(ValueError, train_test_split, range(3), train_size=1.1) + + pytest.raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6) + pytest.raises( + ValueError, + train_test_split, + range(3), + test_size=np.float32(0.6), + train_size=np.float32(0.6), + ) + pytest.raises(ValueError, train_test_split, range(3), test_size="wrong_type") + pytest.raises(ValueError, train_test_split, range(3), test_size=2, train_size=4) + pytest.raises(TypeError, train_test_split, range(3), some_argument=1.1) + pytest.raises(ValueError, train_test_split, range(3), range(42)) + pytest.raises(ValueError, train_test_split, range(10), shuffle=False, stratify=True) + + with pytest.raises( + ValueError, + match=r"train_size=11 should be either positive and " + r"smaller than the number of samples 10 or a " + r"float in the \(0, 1\) range", + ): + train_test_split(range(10), train_size=11, test_size=1) + + +@pytest.mark.parametrize( + "train_size, exp_train, exp_test", [(None, 7, 3), (8, 8, 2), (0.8, 8, 2)] +) +def test_train_test_split_default_test_size(train_size, exp_train, exp_test): + # Check that the default value has the expected behavior, i.e. complement + # train_size unless both are specified. + X_train, X_test = train_test_split(X, train_size=train_size) + + assert len(X_train) == exp_train + assert len(X_test) == exp_test + + +@pytest.mark.parametrize( + "array_namespace, device, dtype_name", yield_namespace_device_dtype_combinations() +) +@pytest.mark.parametrize( + "shuffle,stratify", + ( + (True, None), + (True, np.hstack((np.ones(6), np.zeros(4)))), + # stratification only works with shuffling + (False, None), + ), +) +def test_array_api_train_test_split( + shuffle, stratify, array_namespace, device, dtype_name +): + xp = _array_api_for_tests(array_namespace, device) + + X = np.arange(100).reshape((10, 10)) + y = np.arange(10) + + X_np = X.astype(dtype_name) + X_xp = xp.asarray(X_np, device=device) + + y_np = y.astype(dtype_name) + y_xp = xp.asarray(y_np, device=device) + + X_train_np, X_test_np, y_train_np, y_test_np = train_test_split( + X_np, y, random_state=0, shuffle=shuffle, stratify=stratify + ) + with config_context(array_api_dispatch=True): + if stratify is not None: + stratify_xp = xp.asarray(stratify) + else: + stratify_xp = stratify + X_train_xp, X_test_xp, y_train_xp, y_test_xp = train_test_split( + X_xp, y_xp, shuffle=shuffle, stratify=stratify_xp, random_state=0 + ) + + # Check that namespace is preserved, has to happen with + # array_api_dispatch enabled. + assert get_namespace(X_train_xp)[0] == get_namespace(X_xp)[0] + assert get_namespace(X_test_xp)[0] == get_namespace(X_xp)[0] + assert get_namespace(y_train_xp)[0] == get_namespace(y_xp)[0] + assert get_namespace(y_test_xp)[0] == get_namespace(y_xp)[0] + + # Check device and dtype is preserved on output + assert array_api_device(X_train_xp) == array_api_device(X_xp) + assert array_api_device(y_train_xp) == array_api_device(y_xp) + assert array_api_device(X_test_xp) == array_api_device(X_xp) + assert array_api_device(y_test_xp) == array_api_device(y_xp) + + assert X_train_xp.dtype == X_xp.dtype + assert y_train_xp.dtype == y_xp.dtype + assert X_test_xp.dtype == X_xp.dtype + assert y_test_xp.dtype == y_xp.dtype + + assert_allclose( + _convert_to_numpy(X_train_xp, xp=xp), + X_train_np, + ) + assert_allclose( + _convert_to_numpy(X_test_xp, xp=xp), + X_test_np, + ) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_train_test_split(coo_container): + X = np.arange(100).reshape((10, 10)) + X_s = coo_container(X) + y = np.arange(10) + + # simple test + split = train_test_split(X, y, test_size=None, train_size=0.5) + X_train, X_test, y_train, y_test = split + assert len(y_test) == len(y_train) + # test correspondence of X and y + assert_array_equal(X_train[:, 0], y_train * 10) + assert_array_equal(X_test[:, 0], y_test * 10) + + # don't convert lists to anything else by default + split = train_test_split(X, X_s, y.tolist()) + X_train, X_test, X_s_train, X_s_test, y_train, y_test = split + assert isinstance(y_train, list) + assert isinstance(y_test, list) + + # allow nd-arrays + X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) + y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) + split = train_test_split(X_4d, y_3d) + assert split[0].shape == (7, 5, 3, 2) + assert split[1].shape == (3, 5, 3, 2) + assert split[2].shape == (7, 7, 11) + assert split[3].shape == (3, 7, 11) + + # test stratification option + y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) + for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): + train, test = train_test_split( + y, test_size=test_size, stratify=y, random_state=0 + ) + assert len(test) == exp_test_size + assert len(test) + len(train) == len(y) + # check the 1:1 ratio of ones and twos in the data is preserved + assert np.sum(train == 1) == np.sum(train == 2) + + # test unshuffled split + y = np.arange(10) + for test_size in [2, 0.2]: + train, test = train_test_split(y, shuffle=False, test_size=test_size) + assert_array_equal(test, [8, 9]) + assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7]) + + +def test_train_test_split_32bit_overflow(): + """Check for integer overflow on 32-bit platforms. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20774 + """ + + # A number 'n' big enough for expression 'n * n * train_size' to cause + # an overflow for signed 32-bit integer + big_number = 100000 + + # Definition of 'y' is a part of reproduction - population for at least + # one class should be in the same order of magnitude as size of X + X = np.arange(big_number) + y = X > (0.99 * big_number) + + split = train_test_split(X, y, stratify=y, train_size=0.25) + X_train, X_test, y_train, y_test = split + + assert X_train.size + X_test.size == big_number + assert y_train.size + y_test.size == big_number + + +@ignore_warnings +def test_train_test_split_pandas(): + # check train_test_split doesn't destroy pandas dataframe + types = [MockDataFrame] + try: + from pandas import DataFrame + + types.append(DataFrame) + except ImportError: + pass + for InputFeatureType in types: + # X dataframe + X_df = InputFeatureType(X) + X_train, X_test = train_test_split(X_df) + assert isinstance(X_train, InputFeatureType) + assert isinstance(X_test, InputFeatureType) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_train_test_split_sparse(sparse_container): + # check that train_test_split converts scipy sparse matrices + # to csr, as stated in the documentation + X = np.arange(100).reshape((10, 10)) + X_s = sparse_container(X) + X_train, X_test = train_test_split(X_s) + assert issparse(X_train) and X_train.format == "csr" + assert issparse(X_test) and X_test.format == "csr" + + +def test_train_test_split_mock_pandas(): + # X mock dataframe + X_df = MockDataFrame(X) + X_train, X_test = train_test_split(X_df) + assert isinstance(X_train, MockDataFrame) + assert isinstance(X_test, MockDataFrame) + X_train_arr, X_test_arr = train_test_split(X_df) + + +def test_train_test_split_list_input(): + # Check that when y is a list / list of string labels, it works. + X = np.ones(7) + y1 = ["1"] * 4 + ["0"] * 3 + y2 = np.hstack((np.ones(4), np.zeros(3))) + y3 = y2.tolist() + + for stratify in (True, False): + X_train1, X_test1, y_train1, y_test1 = train_test_split( + X, y1, stratify=y1 if stratify else None, random_state=0 + ) + X_train2, X_test2, y_train2, y_test2 = train_test_split( + X, y2, stratify=y2 if stratify else None, random_state=0 + ) + X_train3, X_test3, y_train3, y_test3 = train_test_split( + X, y3, stratify=y3 if stratify else None, random_state=0 + ) + + np.testing.assert_equal(X_train1, X_train2) + np.testing.assert_equal(y_train2, y_train3) + np.testing.assert_equal(X_test1, X_test3) + np.testing.assert_equal(y_test3, y_test2) + + +@pytest.mark.parametrize( + "test_size, train_size", + [(2.0, None), (1.0, None), (0.1, 0.95), (None, 1j), (11, None), (10, None), (8, 3)], +) +def test_shufflesplit_errors(test_size, train_size): + with pytest.raises(ValueError): + next(ShuffleSplit(test_size=test_size, train_size=train_size).split(X)) + + +def test_shufflesplit_reproducible(): + # Check that iterating twice on the ShuffleSplit gives the same + # sequence of train-test when the random_state is given + ss = ShuffleSplit(random_state=21) + assert_array_equal([a for a, b in ss.split(X)], [a for a, b in ss.split(X)]) + + +def test_stratifiedshufflesplit_list_input(): + # Check that when y is a list / list of string labels, it works. + sss = StratifiedShuffleSplit(test_size=2, random_state=42) + X = np.ones(7) + y1 = ["1"] * 4 + ["0"] * 3 + y2 = np.hstack((np.ones(4), np.zeros(3))) + y3 = y2.tolist() + + np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2))) + np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2))) + + +def test_train_test_split_allow_nans(): + # Check that train_test_split allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + train_test_split(X, y, test_size=0.2, random_state=42) + + +def test_check_cv(): + X = np.ones(9) + cv = check_cv(3, classifier=False) + # Use numpy.testing.assert_equal which recursively compares + # lists of lists + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) + cv = check_cv(3, y_binary, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary)) + ) + + y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) + cv = check_cv(3, y_multiclass, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass)) + ) + # also works with 2d multiclass + y_multiclass_2d = y_multiclass.reshape(-1, 1) + cv = check_cv(3, y_multiclass_2d, classifier=True) + np.testing.assert_equal( + list(StratifiedKFold(3).split(X, y_multiclass_2d)), + list(cv.split(X, y_multiclass_2d)), + ) + + assert not np.all( + next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] + == next(KFold(3).split(X, y_multiclass_2d))[0] + ) + + X = np.ones(5) + y_multilabel = np.array( + [[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]] + ) + cv = check_cv(3, y_multilabel, classifier=True) + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) + cv = check_cv(3, y_multioutput, classifier=True) + np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) + + with pytest.raises(ValueError): + check_cv(cv="lolo") + + +def test_cv_iterable_wrapper(): + kf_iter = KFold().split(X, y) + kf_iter_wrapped = check_cv(kf_iter) + # Since the wrapped iterable is enlisted and stored, + # split can be called any number of times to produce + # consistent results. + np.testing.assert_equal( + list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y)) + ) + # If the splits are randomized, successive calls to split yields different + # results + kf_randomized_iter = KFold(shuffle=True, random_state=0).split(X, y) + kf_randomized_iter_wrapped = check_cv(kf_randomized_iter) + # numpy's assert_array_equal properly compares nested lists + np.testing.assert_equal( + list(kf_randomized_iter_wrapped.split(X, y)), + list(kf_randomized_iter_wrapped.split(X, y)), + ) + + try: + splits_are_equal = True + np.testing.assert_equal( + list(kf_iter_wrapped.split(X, y)), + list(kf_randomized_iter_wrapped.split(X, y)), + ) + except AssertionError: + splits_are_equal = False + assert not splits_are_equal, ( + "If the splits are randomized, " + "successive calls to split should yield different results" + ) + + +@pytest.mark.parametrize("kfold", [GroupKFold, StratifiedGroupKFold]) +def test_group_kfold(kfold): + rng = np.random.RandomState(0) + + # Parameters of the test + n_groups = 15 + n_samples = 1000 + n_splits = 5 + + X = y = np.ones(n_samples) + + # Construct the test data + tolerance = 0.05 * n_samples # 5 percent error allowed + groups = rng.randint(0, n_groups, n_samples) + + ideal_n_groups_per_fold = n_samples // n_splits + + len(np.unique(groups)) + # Get the test fold indices from the test set indices of each fold + folds = np.zeros(n_samples) + lkf = kfold(n_splits=n_splits) + for i, (_, test) in enumerate(lkf.split(X, y, groups)): + folds[test] = i + + # Check that folds have approximately the same size + assert len(folds) == len(groups) + for i in np.unique(folds): + assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold) + + # Check that each group appears only in 1 fold + for group in np.unique(groups): + assert len(np.unique(folds[groups == group])) == 1 + + # Check that no group is on both sides of the split + groups = np.asarray(groups, dtype=object) + for train, test in lkf.split(X, y, groups): + assert len(np.intersect1d(groups[train], groups[test])) == 0 + + # Construct the test data + groups = np.array( + [ + "Albert", + "Jean", + "Bertrand", + "Michel", + "Jean", + "Francis", + "Robert", + "Michel", + "Rachel", + "Lois", + "Michelle", + "Bernard", + "Marion", + "Laura", + "Jean", + "Rachel", + "Franck", + "John", + "Gael", + "Anna", + "Alix", + "Robert", + "Marion", + "David", + "Tony", + "Abel", + "Becky", + "Madmood", + "Cary", + "Mary", + "Alexandre", + "David", + "Francis", + "Barack", + "Abdoul", + "Rasha", + "Xi", + "Silvia", + ] + ) + + n_groups = len(np.unique(groups)) + n_samples = len(groups) + n_splits = 5 + tolerance = 0.05 * n_samples # 5 percent error allowed + ideal_n_groups_per_fold = n_samples // n_splits + + X = y = np.ones(n_samples) + + # Get the test fold indices from the test set indices of each fold + folds = np.zeros(n_samples) + for i, (_, test) in enumerate(lkf.split(X, y, groups)): + folds[test] = i + + # Check that folds have approximately the same size + assert len(folds) == len(groups) + for i in np.unique(folds): + assert tolerance >= abs(sum(folds == i) - ideal_n_groups_per_fold) + + # Check that each group appears only in 1 fold + with warnings.catch_warnings(): + warnings.simplefilter("ignore", FutureWarning) + for group in np.unique(groups): + assert len(np.unique(folds[groups == group])) == 1 + + # Check that no group is on both sides of the split + groups = np.asarray(groups, dtype=object) + for train, test in lkf.split(X, y, groups): + assert len(np.intersect1d(groups[train], groups[test])) == 0 + + # groups can also be a list + cv_iter = list(lkf.split(X, y, groups.tolist())) + for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter): + assert_array_equal(train1, train2) + assert_array_equal(test1, test2) + + # Should fail if there are more folds than groups + groups = np.array([1, 1, 1, 2, 2]) + X = y = np.ones(len(groups)) + with pytest.raises(ValueError, match="Cannot have number of splits.*greater"): + next(GroupKFold(n_splits=3).split(X, y, groups)) + + +def test_time_series_cv(): + X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]] + + # Should fail if there are more folds than samples + with pytest.raises(ValueError, match="Cannot have number of folds.*greater"): + next(TimeSeriesSplit(n_splits=7).split(X)) + + tscv = TimeSeriesSplit(2) + + # Manually check that Time Series CV preserves the data + # ordering on toy datasets + splits = tscv.split(X[:-1]) + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [4, 5]) + + splits = TimeSeriesSplit(2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2]) + assert_array_equal(test, [3, 4]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [5, 6]) + + # Check get_n_splits returns the correct number of splits + splits = TimeSeriesSplit(2).split(X) + n_splits_actual = len(list(splits)) + assert n_splits_actual == tscv.get_n_splits() + assert n_splits_actual == 2 + + +def _check_time_series_max_train_size(splits, check_splits, max_train_size): + for (train, test), (check_train, check_test) in zip(splits, check_splits): + assert_array_equal(test, check_test) + assert len(check_train) <= max_train_size + suffix_start = max(len(train) - max_train_size, 0) + assert_array_equal(check_train, train[suffix_start:]) + + +def test_time_series_max_train_size(): + X = np.zeros((6, 1)) + splits = TimeSeriesSplit(n_splits=3).split(X) + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=3) + + # Test for the case where the size of a fold is greater than max_train_size + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=2) + + # Test for the case where the size of each fold is less than max_train_size + check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X) + _check_time_series_max_train_size(splits, check_splits, max_train_size=2) + + +def test_time_series_test_size(): + X = np.zeros((10, 1)) + + # Test alone + splits = TimeSeriesSplit(n_splits=3, test_size=3).split(X) + + train, test = next(splits) + assert_array_equal(train, [0]) + assert_array_equal(test, [1, 2, 3]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6]) + assert_array_equal(test, [7, 8, 9]) + + # Test with max_train_size + splits = TimeSeriesSplit(n_splits=2, test_size=2, max_train_size=4).split(X) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4, 5]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [4, 5, 6, 7]) + assert_array_equal(test, [8, 9]) + + # Should fail with not enough data points for configuration + with pytest.raises(ValueError, match="Too many splits.*with test_size"): + splits = TimeSeriesSplit(n_splits=5, test_size=2).split(X) + next(splits) + + +def test_time_series_gap(): + X = np.zeros((10, 1)) + + # Test alone + splits = TimeSeriesSplit(n_splits=2, gap=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [7, 8, 9]) + + # Test with max_train_size + splits = TimeSeriesSplit(n_splits=3, gap=2, max_train_size=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5]) + + train, test = next(splits) + assert_array_equal(train, [2, 3]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [4, 5]) + assert_array_equal(test, [8, 9]) + + # Test with test_size + splits = TimeSeriesSplit(n_splits=2, gap=2, max_train_size=4, test_size=2).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3]) + assert_array_equal(test, [6, 7]) + + train, test = next(splits) + assert_array_equal(train, [2, 3, 4, 5]) + assert_array_equal(test, [8, 9]) + + # Test with additional test_size + splits = TimeSeriesSplit(n_splits=2, gap=2, test_size=3).split(X) + + train, test = next(splits) + assert_array_equal(train, [0, 1]) + assert_array_equal(test, [4, 5, 6]) + + train, test = next(splits) + assert_array_equal(train, [0, 1, 2, 3, 4]) + assert_array_equal(test, [7, 8, 9]) + + # Verify proper error is thrown + with pytest.raises(ValueError, match="Too many splits.*and gap"): + splits = TimeSeriesSplit(n_splits=4, gap=2).split(X) + next(splits) + + +def test_nested_cv(): + # Test if nested cross validation works with different combinations of cv + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=15, n_classes=2, random_state=0) + groups = rng.randint(0, 5, 15) + + cvs = [ + LeaveOneGroupOut(), + StratifiedKFold(n_splits=2), + LeaveOneOut(), + GroupKFold(n_splits=3), + StratifiedKFold(), + StratifiedGroupKFold(), + StratifiedShuffleSplit(n_splits=3, random_state=0), + ] + + for inner_cv, outer_cv in combinations_with_replacement(cvs, 2): + gs = GridSearchCV( + DummyClassifier(), + param_grid={"strategy": ["stratified", "most_frequent"]}, + cv=inner_cv, + error_score="raise", + ) + cross_val_score( + gs, X=X, y=y, groups=groups, cv=outer_cv, params={"groups": groups} + ) + + +def test_build_repr(): + class MockSplitter: + def __init__(self, a, b=0, c=None): + self.a = a + self.b = b + self.c = c + + def __repr__(self): + return _build_repr(self) + + assert repr(MockSplitter(5, 6)) == "MockSplitter(a=5, b=6, c=None)" + + +@pytest.mark.parametrize( + "CVSplitter", (ShuffleSplit, GroupShuffleSplit, StratifiedShuffleSplit) +) +def test_shuffle_split_empty_trainset(CVSplitter): + cv = CVSplitter(test_size=0.99) + X, y = [[1]], [0] # 1 sample + with pytest.raises( + ValueError, + match=( + "With n_samples=1, test_size=0.99 and train_size=None, " + "the resulting train set will be empty" + ), + ): + next(cv.split(X, y, groups=[1])) + + +def test_train_test_split_empty_trainset(): + (X,) = [[1]] # 1 sample + with pytest.raises( + ValueError, + match=( + "With n_samples=1, test_size=0.99 and train_size=None, " + "the resulting train set will be empty" + ), + ): + train_test_split(X, test_size=0.99) + + X = [[1], [1], [1]] # 3 samples, ask for more than 2 thirds + with pytest.raises( + ValueError, + match=( + "With n_samples=3, test_size=0.67 and train_size=None, " + "the resulting train set will be empty" + ), + ): + train_test_split(X, test_size=0.67) + + +def test_leave_one_out_empty_trainset(): + # LeaveOneGroup out expect at least 2 groups so no need to check + cv = LeaveOneOut() + X, y = [[1]], [0] # 1 sample + with pytest.raises(ValueError, match="Cannot perform LeaveOneOut with n_samples=1"): + next(cv.split(X, y)) + + +def test_leave_p_out_empty_trainset(): + # No need to check LeavePGroupsOut + cv = LeavePOut(p=2) + X, y = [[1], [2]], [0, 3] # 2 samples + with pytest.raises( + ValueError, match="p=2 must be strictly less than the number of samples=2" + ): + next(cv.split(X, y, groups=[1, 2])) + + +@pytest.mark.parametrize("Klass", (KFold, StratifiedKFold, StratifiedGroupKFold)) +def test_random_state_shuffle_false(Klass): + # passing a non-default random_state when shuffle=False makes no sense + with pytest.raises(ValueError, match="has no effect since shuffle is False"): + Klass(3, shuffle=False, random_state=0) + + +@pytest.mark.parametrize( + "cv, expected", + [ + (KFold(), True), + (KFold(shuffle=True, random_state=123), True), + (StratifiedKFold(), True), + (StratifiedKFold(shuffle=True, random_state=123), True), + (StratifiedGroupKFold(shuffle=True, random_state=123), True), + (StratifiedGroupKFold(), True), + (RepeatedKFold(random_state=123), True), + (RepeatedStratifiedKFold(random_state=123), True), + (ShuffleSplit(random_state=123), True), + (GroupShuffleSplit(random_state=123), True), + (StratifiedShuffleSplit(random_state=123), True), + (GroupKFold(), True), + (TimeSeriesSplit(), True), + (LeaveOneOut(), True), + (LeaveOneGroupOut(), True), + (LeavePGroupsOut(n_groups=2), True), + (LeavePOut(p=2), True), + (KFold(shuffle=True, random_state=None), False), + (KFold(shuffle=True, random_state=None), False), + (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), + (StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)), False), + (RepeatedKFold(random_state=None), False), + (RepeatedKFold(random_state=np.random.RandomState(0)), False), + (RepeatedStratifiedKFold(random_state=None), False), + (RepeatedStratifiedKFold(random_state=np.random.RandomState(0)), False), + (ShuffleSplit(random_state=None), False), + (ShuffleSplit(random_state=np.random.RandomState(0)), False), + (GroupShuffleSplit(random_state=None), False), + (GroupShuffleSplit(random_state=np.random.RandomState(0)), False), + (StratifiedShuffleSplit(random_state=None), False), + (StratifiedShuffleSplit(random_state=np.random.RandomState(0)), False), + ], +) +def test_yields_constant_splits(cv, expected): + assert _yields_constant_splits(cv) == expected + + +@pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS]) +def test_splitter_get_metadata_routing(cv): + """Check get_metadata_routing returns the correct MetadataRouter.""" + assert hasattr(cv, "get_metadata_routing") + metadata = cv.get_metadata_routing() + if cv in GROUP_SPLITTERS: + assert metadata.split.requests["groups"] is True + elif cv in NO_GROUP_SPLITTERS: + assert not metadata.split.requests + + assert_request_is_empty(metadata, exclude=["split"]) + + +@pytest.mark.parametrize("cv", ALL_SPLITTERS, ids=[str(cv) for cv in ALL_SPLITTERS]) +def test_splitter_set_split_request(cv): + """Check set_split_request is defined for group splitters and not for others.""" + if cv in GROUP_SPLITTERS: + assert hasattr(cv, "set_split_request") + elif cv in NO_GROUP_SPLITTERS: + assert not hasattr(cv, "set_split_request") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py new file mode 100644 index 0000000000000000000000000000000000000000..6c89f89afa68481c761e700bb231a7dafb452c65 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_successive_halving.py @@ -0,0 +1,848 @@ +from math import ceil + +import numpy as np +import pytest +from scipy.stats import expon, norm, randint + +from sklearn.datasets import make_classification +from sklearn.dummy import DummyClassifier +from sklearn.experimental import enable_halving_search_cv # noqa +from sklearn.model_selection import ( + GroupKFold, + GroupShuffleSplit, + HalvingGridSearchCV, + HalvingRandomSearchCV, + KFold, + LeaveOneGroupOut, + LeavePGroupsOut, + ShuffleSplit, + StratifiedKFold, + StratifiedShuffleSplit, +) +from sklearn.model_selection._search_successive_halving import ( + _SubsampleMetaSplitter, + _top_k, +) +from sklearn.model_selection.tests.test_search import ( + check_cv_results_array_types, + check_cv_results_keys, +) +from sklearn.svm import SVC, LinearSVC + + +class FastClassifier(DummyClassifier): + """Dummy classifier that accepts parameters a, b, ... z. + + These parameter don't affect the predictions and are useful for fast + grid searching.""" + + # update the constraints such that we accept all parameters from a to z + _parameter_constraints: dict = { + **DummyClassifier._parameter_constraints, + **{ + chr(key): "no_validation" # type: ignore + for key in range(ord("a"), ord("z") + 1) + }, + } + + def __init__( + self, strategy="stratified", random_state=None, constant=None, **kwargs + ): + super().__init__( + strategy=strategy, random_state=random_state, constant=constant + ) + + def get_params(self, deep=False): + params = super().get_params(deep=deep) + for char in range(ord("a"), ord("z") + 1): + params[chr(char)] = "whatever" + return params + + +class SometimesFailClassifier(DummyClassifier): + def __init__( + self, + strategy="stratified", + random_state=None, + constant=None, + n_estimators=10, + fail_fit=False, + fail_predict=False, + a=0, + ): + self.fail_fit = fail_fit + self.fail_predict = fail_predict + self.n_estimators = n_estimators + self.a = a + + super().__init__( + strategy=strategy, random_state=random_state, constant=constant + ) + + def fit(self, X, y): + if self.fail_fit: + raise Exception("fitting failed") + return super().fit(X, y) + + def predict(self, X): + if self.fail_predict: + raise Exception("predict failed") + return super().predict(X) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.FitFailedWarning") +@pytest.mark.filterwarnings("ignore:Scoring failed:UserWarning") +@pytest.mark.filterwarnings("ignore:One or more of the:UserWarning") +@pytest.mark.parametrize("HalvingSearch", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize("fail_at", ("fit", "predict")) +def test_nan_handling(HalvingSearch, fail_at): + """Check the selection of the best scores in presence of failure represented by + NaN values.""" + n_samples = 1_000 + X, y = make_classification(n_samples=n_samples, random_state=0) + + search = HalvingSearch( + SometimesFailClassifier(), + {f"fail_{fail_at}": [False, True], "a": range(3)}, + resource="n_estimators", + max_resources=6, + min_resources=1, + factor=2, + ) + + search.fit(X, y) + + # estimators that failed during fit/predict should always rank lower + # than ones where the fit/predict succeeded + assert not search.best_params_[f"fail_{fail_at}"] + scores = search.cv_results_["mean_test_score"] + ranks = search.cv_results_["rank_test_score"] + + # some scores should be NaN + assert np.isnan(scores).any() + + unique_nan_ranks = np.unique(ranks[np.isnan(scores)]) + # all NaN scores should have the same rank + assert unique_nan_ranks.shape[0] == 1 + # NaNs should have the lowest rank + assert (unique_nan_ranks[0] >= ranks).all() + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + ( + "aggressive_elimination," + "max_resources," + "expected_n_iterations," + "expected_n_required_iterations," + "expected_n_possible_iterations," + "expected_n_remaining_candidates," + "expected_n_candidates," + "expected_n_resources," + ), + [ + # notice how it loops at the beginning + # also, the number of candidates evaluated at the last iteration is + # <= factor + (True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]), + # no aggressive elimination: we end up with less iterations, and + # the number of candidates at the last iter is > factor, which isn't + # ideal + (False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]), + # # When the amount of resource isn't limited, aggressive_elimination + # # has no effect. Here the default min_resources='exhaust' will take + # # over. + (True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), + (False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]), + ], +) +def test_aggressive_elimination( + Est, + aggressive_elimination, + max_resources, + expected_n_iterations, + expected_n_required_iterations, + expected_n_possible_iterations, + expected_n_remaining_candidates, + expected_n_candidates, + expected_n_resources, +): + # Test the aggressive_elimination parameter. + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifier() + + if max_resources == "limited": + max_resources = 180 + else: + max_resources = n_samples + + sh = Est( + base_estimator, + param_grid, + aggressive_elimination=aggressive_elimination, + max_resources=max_resources, + factor=3, + ) + sh.set_params(verbose=True) # just for test coverage + + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + assert sh.n_iterations_ == expected_n_iterations + assert sh.n_required_iterations_ == expected_n_required_iterations + assert sh.n_possible_iterations_ == expected_n_possible_iterations + assert sh.n_resources_ == expected_n_resources + assert sh.n_candidates_ == expected_n_candidates + assert sh.n_remaining_candidates_ == expected_n_remaining_candidates + assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_ + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + ( + "min_resources," + "max_resources," + "expected_n_iterations," + "expected_n_possible_iterations," + "expected_n_resources," + ), + [ + # with enough resources + ("smallest", "auto", 2, 4, [20, 60]), + # with enough resources but min_resources set manually + (50, "auto", 2, 3, [50, 150]), + # without enough resources, only one iteration can be done + ("smallest", 30, 1, 1, [20]), + # with exhaust: use as much resources as possible at the last iter + ("exhaust", "auto", 2, 2, [333, 999]), + ("exhaust", 1000, 2, 2, [333, 999]), + ("exhaust", 999, 2, 2, [333, 999]), + ("exhaust", 600, 2, 2, [200, 600]), + ("exhaust", 599, 2, 2, [199, 597]), + ("exhaust", 300, 2, 2, [100, 300]), + ("exhaust", 60, 2, 2, [20, 60]), + ("exhaust", 50, 1, 1, [20]), + ("exhaust", 20, 1, 1, [20]), + ], +) +def test_min_max_resources( + Est, + min_resources, + max_resources, + expected_n_iterations, + expected_n_possible_iterations, + expected_n_resources, +): + # Test the min_resources and max_resources parameters, and how they affect + # the number of resources used at each iteration + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": [1, 2], "b": [1, 2, 3]} + base_estimator = FastClassifier() + + sh = Est( + base_estimator, + param_grid, + factor=3, + min_resources=min_resources, + max_resources=max_resources, + ) + if Est is HalvingRandomSearchCV: + sh.set_params(n_candidates=6) # same number as with the grid + + sh.fit(X, y) + + expected_n_required_iterations = 2 # given 6 combinations and factor = 3 + assert sh.n_iterations_ == expected_n_iterations + assert sh.n_required_iterations_ == expected_n_required_iterations + assert sh.n_possible_iterations_ == expected_n_possible_iterations + assert sh.n_resources_ == expected_n_resources + if min_resources == "exhaust": + assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_) + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +@pytest.mark.parametrize( + "max_resources, n_iterations, n_possible_iterations", + [ + ("auto", 5, 9), # all resources are used + (1024, 5, 9), + (700, 5, 8), + (512, 5, 8), + (511, 5, 7), + (32, 4, 4), + (31, 3, 3), + (16, 3, 3), + (4, 1, 1), # max_resources == min_resources, only one iteration is + # possible + ], +) +def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations): + # test the number of actual iterations that were run depending on + # max_resources + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=1) + param_grid = {"a": [1, 2], "b": list(range(10))} + base_estimator = FastClassifier() + factor = 2 + + sh = Est( + base_estimator, + param_grid, + cv=2, + factor=factor, + max_resources=max_resources, + min_resources=4, + ) + if Est is HalvingRandomSearchCV: + sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV + sh.fit(X, y) + assert sh.n_required_iterations_ == 5 + assert sh.n_iterations_ == n_iterations + assert sh.n_possible_iterations_ == n_possible_iterations + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +def test_resource_parameter(Est): + # Test the resource parameter + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": [1, 2], "b": list(range(10))} + base_estimator = FastClassifier() + sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3) + sh.fit(X, y) + assert set(sh.n_resources_) == set([1, 3, 9]) + for r_i, params, param_c in zip( + sh.cv_results_["n_resources"], + sh.cv_results_["params"], + sh.cv_results_["param_c"], + ): + assert r_i == params["c"] == param_c + + with pytest.raises( + ValueError, match="Cannot use resource=1234 which is not supported " + ): + sh = HalvingGridSearchCV( + base_estimator, param_grid, cv=2, resource="1234", max_resources=10 + ) + sh.fit(X, y) + + with pytest.raises( + ValueError, + match=( + "Cannot use parameter c as the resource since it is part " + "of the searched parameters." + ), + ): + param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]} + sh = HalvingGridSearchCV( + base_estimator, param_grid, cv=2, resource="c", max_resources=10 + ) + sh.fit(X, y) + + +@pytest.mark.parametrize( + "max_resources, n_candidates, expected_n_candidates", + [ + (512, "exhaust", 128), # generate exactly as much as needed + (32, "exhaust", 8), + (32, 8, 8), + (32, 7, 7), # ask for less than what we could + (32, 9, 9), # ask for more than 'reasonable' + ], +) +def test_random_search(max_resources, n_candidates, expected_n_candidates): + # Test random search and make sure the number of generated candidates is + # as expected + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": norm, "b": norm} + base_estimator = FastClassifier() + sh = HalvingRandomSearchCV( + base_estimator, + param_grid, + n_candidates=n_candidates, + cv=2, + max_resources=max_resources, + factor=2, + min_resources=4, + ) + sh.fit(X, y) + assert sh.n_candidates_[0] == expected_n_candidates + if n_candidates == "exhaust": + # Make sure 'exhaust' makes the last iteration use as much resources as + # we can + assert sh.n_resources_[-1] == max_resources + + +@pytest.mark.parametrize( + "param_distributions, expected_n_candidates", + [ + ({"a": [1, 2]}, 2), # all lists, sample less than n_candidates + ({"a": randint(1, 3)}, 10), # not all list, respect n_candidates + ], +) +def test_random_search_discrete_distributions( + param_distributions, expected_n_candidates +): + # Make sure random search samples the appropriate number of candidates when + # we ask for more than what's possible. How many parameters are sampled + # depends whether the distributions are 'all lists' or not (see + # ParameterSampler for details). This is somewhat redundant with the checks + # in ParameterSampler but interaction bugs were discovered during + # development of SH + + n_samples = 1024 + X, y = make_classification(n_samples=n_samples, random_state=0) + base_estimator = FastClassifier() + sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10) + sh.fit(X, y) + assert sh.n_candidates_[0] == expected_n_candidates + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +@pytest.mark.parametrize( + "params, expected_error_message", + [ + ( + {"resource": "not_a_parameter"}, + "Cannot use resource=not_a_parameter which is not supported", + ), + ( + {"resource": "a", "max_resources": 100}, + "Cannot use parameter a as the resource since it is part of", + ), + ( + {"max_resources": "auto", "resource": "b"}, + "resource can only be 'n_samples' when max_resources='auto'", + ), + ( + {"min_resources": 15, "max_resources": 14}, + "min_resources_=15 is greater than max_resources_=14", + ), + ({"cv": KFold(shuffle=True)}, "must yield consistent folds"), + ({"cv": ShuffleSplit()}, "must yield consistent folds"), + ], +) +def test_input_errors(Est, params, expected_error_message): + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X, y = make_classification(100) + + sh = Est(base_estimator, param_grid, **params) + + with pytest.raises(ValueError, match=expected_error_message): + sh.fit(X, y) + + +@pytest.mark.parametrize( + "params, expected_error_message", + [ + ( + {"n_candidates": "exhaust", "min_resources": "exhaust"}, + "cannot be both set to 'exhaust'", + ), + ], +) +def test_input_errors_randomized(params, expected_error_message): + # tests specific to HalvingRandomSearchCV + + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X, y = make_classification(100) + + sh = HalvingRandomSearchCV(base_estimator, param_grid, **params) + + with pytest.raises(ValueError, match=expected_error_message): + sh.fit(X, y) + + +@pytest.mark.parametrize( + "fraction, subsample_test, expected_train_size, expected_test_size", + [ + (0.5, True, 40, 10), + (0.5, False, 40, 20), + (0.2, True, 16, 4), + (0.2, False, 16, 20), + ], +) +def test_subsample_splitter_shapes( + fraction, subsample_test, expected_train_size, expected_test_size +): + # Make sure splits returned by SubsampleMetaSplitter are of appropriate + # size + + n_samples = 100 + X, y = make_classification(n_samples) + cv = _SubsampleMetaSplitter( + base_cv=KFold(5), + fraction=fraction, + subsample_test=subsample_test, + random_state=None, + ) + + for train, test in cv.split(X, y): + assert train.shape[0] == expected_train_size + assert test.shape[0] == expected_test_size + if subsample_test: + assert train.shape[0] + test.shape[0] == int(n_samples * fraction) + else: + assert test.shape[0] == n_samples // cv.base_cv.get_n_splits() + + +@pytest.mark.parametrize("subsample_test", (True, False)) +def test_subsample_splitter_determinism(subsample_test): + # Make sure _SubsampleMetaSplitter is consistent across calls to split(): + # - we're OK having training sets differ (they're always sampled with a + # different fraction anyway) + # - when we don't subsample the test set, we want it to be always the same. + # This check is the most important. This is ensured by the determinism + # of the base_cv. + + # Note: we could force both train and test splits to be always the same if + # we drew an int seed in _SubsampleMetaSplitter.__init__ + + n_samples = 100 + X, y = make_classification(n_samples) + cv = _SubsampleMetaSplitter( + base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None + ) + + folds_a = list(cv.split(X, y, groups=None)) + folds_b = list(cv.split(X, y, groups=None)) + + for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b): + assert not np.all(train_a == train_b) + + if subsample_test: + assert not np.all(test_a == test_b) + else: + assert np.all(test_a == test_b) + assert np.all(X[test_a] == X[test_b]) + + +@pytest.mark.parametrize( + "k, itr, expected", + [ + (1, 0, ["c"]), + (2, 0, ["a", "c"]), + (4, 0, ["d", "b", "a", "c"]), + (10, 0, ["d", "b", "a", "c"]), + (1, 1, ["e"]), + (2, 1, ["f", "e"]), + (10, 1, ["f", "e"]), + (1, 2, ["i"]), + (10, 2, ["g", "h", "i"]), + ], +) +def test_top_k(k, itr, expected): + results = { # this isn't a 'real world' result dict + "iter": [0, 0, 0, 0, 1, 1, 2, 2, 2], + "mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9], + "params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"], + } + got = _top_k(results, k=k, itr=itr) + assert np.all(got == expected) + + +@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV)) +def test_cv_results(Est): + # test that the cv_results_ matches correctly the logic of the + # tournament: in particular that the candidates continued in each + # successive iteration are those that were best in the previous iteration + pd = pytest.importorskip("pandas") + + rng = np.random.RandomState(0) + + n_samples = 1000 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifier() + + # generate random scores: we want to avoid ties, which would otherwise + # mess with the ordering and make testing harder + def scorer(est, X, y): + return rng.rand() + + sh = Est(base_estimator, param_grid, factor=2, scoring=scorer) + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + # non-regression check for + # https://github.com/scikit-learn/scikit-learn/issues/19203 + assert isinstance(sh.cv_results_["iter"], np.ndarray) + assert isinstance(sh.cv_results_["n_resources"], np.ndarray) + + cv_results_df = pd.DataFrame(sh.cv_results_) + + # just make sure we don't have ties + assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df) + + cv_results_df["params_str"] = cv_results_df["params"].apply(str) + table = cv_results_df.pivot( + index="params_str", columns="iter", values="mean_test_score" + ) + + # table looks like something like this: + # iter 0 1 2 3 4 5 + # params_str + # {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN + # {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN + # {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN + # {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN + # {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN + # ... + + # where a NaN indicates that the candidate wasn't evaluated at a given + # iteration, because it wasn't part of the top-K at some previous + # iteration. We here make sure that candidates that aren't in the top-k at + # any given iteration are indeed not evaluated at the subsequent + # iterations. + nan_mask = pd.isna(table) + n_iter = sh.n_iterations_ + for it in range(n_iter - 1): + already_discarded_mask = nan_mask[it] + + # make sure that if a candidate is already discarded, we don't evaluate + # it later + assert ( + already_discarded_mask & nan_mask[it + 1] == already_discarded_mask + ).all() + + # make sure that the number of discarded candidate is correct + discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1] + kept_mask = ~already_discarded_mask & ~discarded_now_mask + assert kept_mask.sum() == sh.n_candidates_[it + 1] + + # make sure that all discarded candidates have a lower score than the + # kept candidates + discarded_max_score = table[it].where(discarded_now_mask).max() + kept_min_score = table[it].where(kept_mask).min() + assert discarded_max_score < kept_min_score + + # We now make sure that the best candidate is chosen only from the last + # iteration. + # We also make sure this is true even if there were higher scores in + # earlier rounds (this isn't generally the case, but worth ensuring it's + # possible). + + last_iter = cv_results_df["iter"].max() + idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][ + "mean_test_score" + ].idxmax() + idx_best_all_iters = cv_results_df["mean_test_score"].idxmax() + + assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"] + assert ( + cv_results_df.iloc[idx_best_last_iter]["mean_test_score"] + < cv_results_df.iloc[idx_best_all_iters]["mean_test_score"] + ) + assert ( + cv_results_df.iloc[idx_best_last_iter]["params"] + != cv_results_df.iloc[idx_best_all_iters]["params"] + ) + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +def test_base_estimator_inputs(Est): + # make sure that the base estimators are passed the correct parameters and + # number of samples at each iteration. + pd = pytest.importorskip("pandas") + + passed_n_samples_fit = [] + passed_n_samples_predict = [] + passed_params = [] + + class FastClassifierBookKeeping(FastClassifier): + def fit(self, X, y): + passed_n_samples_fit.append(X.shape[0]) + return super().fit(X, y) + + def predict(self, X): + passed_n_samples_predict.append(X.shape[0]) + return super().predict(X) + + def set_params(self, **params): + passed_params.append(params) + return super().set_params(**params) + + n_samples = 1024 + n_splits = 2 + X, y = make_classification(n_samples=n_samples, random_state=0) + param_grid = {"a": ("l1", "l2"), "b": list(range(30))} + base_estimator = FastClassifierBookKeeping() + + sh = Est( + base_estimator, + param_grid, + factor=2, + cv=n_splits, + return_train_score=False, + refit=False, + ) + if Est is HalvingRandomSearchCV: + # same number of candidates as with the grid + sh.set_params(n_candidates=2 * 30, min_resources="exhaust") + + sh.fit(X, y) + + assert len(passed_n_samples_fit) == len(passed_n_samples_predict) + passed_n_samples = [ + x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict) + ] + + # Lists are of length n_splits * n_iter * n_candidates_at_i. + # Each chunk of size n_splits corresponds to the n_splits folds for the + # same candidate at the same iteration, so they contain equal values. We + # subsample such that the lists are of length n_iter * n_candidates_at_it + passed_n_samples = passed_n_samples[::n_splits] + passed_params = passed_params[::n_splits] + + cv_results_df = pd.DataFrame(sh.cv_results_) + + assert len(passed_params) == len(passed_n_samples) == len(cv_results_df) + + uniques, counts = np.unique(passed_n_samples, return_counts=True) + assert (sh.n_resources_ == uniques).all() + assert (sh.n_candidates_ == counts).all() + + assert (cv_results_df["params"] == passed_params).all() + assert (cv_results_df["n_resources"] == passed_n_samples).all() + + +@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV)) +def test_groups_support(Est): + # Check if ValueError (when groups is None) propagates to + # HalvingGridSearchCV and HalvingRandomSearchCV + # And also check if groups is correctly passed to the cv object + rng = np.random.RandomState(0) + + X, y = make_classification(n_samples=50, n_classes=2, random_state=0) + groups = rng.randint(0, 3, 50) + + clf = LinearSVC(dual="auto", random_state=0) + grid = {"C": [1]} + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(n_splits=3), + GroupShuffleSplit(random_state=0), + ] + error_msg = "The 'groups' parameter should not be None." + for cv in group_cvs: + gs = Est(clf, grid, cv=cv, random_state=0) + with pytest.raises(ValueError, match=error_msg): + gs.fit(X, y) + gs.fit(X, y, groups=groups) + + non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)] + for cv in non_group_cvs: + gs = Est(clf, grid, cv=cv) + # Should not raise an error + gs.fit(X, y) + + +@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV]) +def test_min_resources_null(SearchCV): + """Check that we raise an error if the minimum resources is set to 0.""" + base_estimator = FastClassifier() + param_grid = {"a": [1]} + X = np.empty(0).reshape(0, 3) + + search = SearchCV(base_estimator, param_grid, min_resources="smallest") + + err_msg = "min_resources_=0: you might have passed an empty dataset X." + with pytest.raises(ValueError, match=err_msg): + search.fit(X, []) + + +@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV]) +def test_select_best_index(SearchCV): + """Check the selection strategy of the halving search.""" + results = { # this isn't a 'real world' result dict + "iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), + "mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), + "params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]), + } + + # we expect the index of 'i' + best_index = SearchCV._select_best_index(None, None, results) + assert best_index == 8 + + +def test_halving_random_search_list_of_dicts(): + """Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution` + being a list of dictionary. + """ + X, y = make_classification(n_samples=150, n_features=4, random_state=42) + + params = [ + {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, + {"kernel": ["poly"], "degree": [2, 3]}, + ] + param_keys = ( + "param_C", + "param_degree", + "param_gamma", + "param_kernel", + ) + score_keys = ( + "mean_test_score", + "mean_train_score", + "rank_test_score", + "split0_test_score", + "split1_test_score", + "split2_test_score", + "split0_train_score", + "split1_train_score", + "split2_train_score", + "std_test_score", + "std_train_score", + "mean_fit_time", + "std_fit_time", + "mean_score_time", + "std_score_time", + ) + extra_keys = ("n_resources", "iter") + + search = HalvingRandomSearchCV( + SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0 + ) + search.fit(X, y) + n_candidates = sum(search.n_candidates_) + cv_results = search.cv_results_ + # Check results structure + check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys) + check_cv_results_array_types(search, param_keys, score_keys) + + assert all( + ( + cv_results["param_C"].mask[i] + and cv_results["param_gamma"].mask[i] + and not cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "poly" + ) + assert all( + ( + not cv_results["param_C"].mask[i] + and not cv_results["param_gamma"].mask[i] + and cv_results["param_degree"].mask[i] + ) + for i in range(n_candidates) + if cv_results["param_kernel"][i] == "rbf" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ecfd14f45a3a75f24a9d0258bcf768a9365704 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/model_selection/tests/test_validation.py @@ -0,0 +1,2621 @@ +"""Test the validation module""" +import os +import re +import sys +import tempfile +import warnings +from functools import partial +from io import StringIO +from time import sleep + +import numpy as np +import pytest +from scipy.sparse import issparse + +from sklearn.base import BaseEstimator, clone +from sklearn.cluster import KMeans +from sklearn.datasets import ( + load_diabetes, + load_digits, + load_iris, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.ensemble import RandomForestClassifier +from sklearn.exceptions import FitFailedWarning +from sklearn.impute import SimpleImputer +from sklearn.linear_model import ( + LogisticRegression, + PassiveAggressiveClassifier, + Ridge, + RidgeClassifier, + SGDClassifier, +) +from sklearn.metrics import ( + accuracy_score, + check_scoring, + confusion_matrix, + explained_variance_score, + make_scorer, + mean_squared_error, + precision_recall_fscore_support, + precision_score, + r2_score, +) +from sklearn.model_selection import ( + GridSearchCV, + GroupKFold, + GroupShuffleSplit, + KFold, + LeaveOneGroupOut, + LeaveOneOut, + LeavePGroupsOut, + ShuffleSplit, + StratifiedKFold, + cross_val_predict, + cross_val_score, + cross_validate, + learning_curve, + permutation_test_score, + validation_curve, +) +from sklearn.model_selection._validation import ( + _check_is_permutation, + _fit_and_score, + _score, +) +from sklearn.model_selection.tests.common import OneTimeSplitter +from sklearn.model_selection.tests.test_search import FailingClassifier +from sklearn.multiclass import OneVsRestClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neural_network import MLPRegressor +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import LabelEncoder, scale +from sklearn.svm import SVC, LinearSVC +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingScorer, + ConsumingSplitter, + _Registry, + check_recorded_metadata, +) +from sklearn.utils import shuffle +from sklearn.utils._mocking import CheckingClassifier, MockDataFrame +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS +from sklearn.utils.validation import _num_samples + + +class MockImprovingEstimator(BaseEstimator): + """Dummy classifier to test the learning curve""" + + def __init__(self, n_max_train_sizes): + self.n_max_train_sizes = n_max_train_sizes + self.train_sizes = 0 + self.X_subset = None + + def fit(self, X_subset, y_subset=None): + self.X_subset = X_subset + self.train_sizes = X_subset.shape[0] + return self + + def predict(self, X): + raise NotImplementedError + + def score(self, X=None, Y=None): + # training score becomes worse (2 -> 1), test error better (0 -> 1) + if self._is_training_data(X): + return 2.0 - float(self.train_sizes) / self.n_max_train_sizes + else: + return float(self.train_sizes) / self.n_max_train_sizes + + def _is_training_data(self, X): + return X is self.X_subset + + +class MockIncrementalImprovingEstimator(MockImprovingEstimator): + """Dummy classifier that provides partial_fit""" + + def __init__(self, n_max_train_sizes, expected_fit_params=None): + super().__init__(n_max_train_sizes) + self.x = None + self.expected_fit_params = expected_fit_params + + def _is_training_data(self, X): + return self.x in X + + def partial_fit(self, X, y=None, **params): + self.train_sizes += X.shape[0] + self.x = X[0] + if self.expected_fit_params: + missing = set(self.expected_fit_params) - set(params) + if missing: + raise AssertionError( + f"Expected fit parameter(s) {list(missing)} not seen." + ) + for key, value in params.items(): + if key in self.expected_fit_params and _num_samples( + value + ) != _num_samples(X): + raise AssertionError( + f"Fit parameter {key} has length {_num_samples(value)}" + f"; expected {_num_samples(X)}." + ) + + +class MockEstimatorWithParameter(BaseEstimator): + """Dummy classifier to test the validation curve""" + + def __init__(self, param=0.5): + self.X_subset = None + self.param = param + + def fit(self, X_subset, y_subset): + self.X_subset = X_subset + self.train_sizes = X_subset.shape[0] + return self + + def predict(self, X): + raise NotImplementedError + + def score(self, X=None, y=None): + return self.param if self._is_training_data(X) else 1 - self.param + + def _is_training_data(self, X): + return X is self.X_subset + + +class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter): + """Dummy classifier that disallows repeated calls of fit method""" + + def fit(self, X_subset, y_subset): + assert not hasattr(self, "fit_called_"), "fit is called the second time" + self.fit_called_ = True + return super().fit(X_subset, y_subset) + + def predict(self, X): + raise NotImplementedError + + +class MockClassifier: + """Dummy classifier to test the cross-validation""" + + def __init__(self, a=0, allow_nd=False): + self.a = a + self.allow_nd = allow_nd + + def fit( + self, + X, + Y=None, + sample_weight=None, + class_prior=None, + sparse_sample_weight=None, + sparse_param=None, + dummy_int=None, + dummy_str=None, + dummy_obj=None, + callback=None, + ): + """The dummy arguments are to test that this fit function can + accept non-array arguments through cross-validation, such as: + - int + - str (this is actually array-like) + - object + - function + """ + self.dummy_int = dummy_int + self.dummy_str = dummy_str + self.dummy_obj = dummy_obj + if callback is not None: + callback(self) + + if self.allow_nd: + X = X.reshape(len(X), -1) + if X.ndim >= 3 and not self.allow_nd: + raise ValueError("X cannot be d") + if sample_weight is not None: + assert sample_weight.shape[0] == X.shape[0], ( + "MockClassifier extra fit_param " + "sample_weight.shape[0] is {0}, should be {1}".format( + sample_weight.shape[0], X.shape[0] + ) + ) + if class_prior is not None: + assert class_prior.shape[0] == len(np.unique(y)), ( + "MockClassifier extra fit_param class_prior.shape[0]" + " is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y))) + ) + if sparse_sample_weight is not None: + fmt = ( + "MockClassifier extra fit_param sparse_sample_weight" + ".shape[0] is {0}, should be {1}" + ) + assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format( + sparse_sample_weight.shape[0], X.shape[0] + ) + if sparse_param is not None: + fmt = ( + "MockClassifier extra fit_param sparse_param.shape " + "is ({0}, {1}), should be ({2}, {3})" + ) + assert sparse_param.shape == P.shape, fmt.format( + sparse_param.shape[0], + sparse_param.shape[1], + P.shape[0], + P.shape[1], + ) + return self + + def predict(self, T): + if self.allow_nd: + T = T.reshape(len(T), -1) + return T[:, 0] + + def predict_proba(self, T): + return T + + def score(self, X=None, Y=None): + return 1.0 / (1 + np.abs(self.a)) + + def get_params(self, deep=False): + return {"a": self.a, "allow_nd": self.allow_nd} + + +# XXX: use 2D array, since 1D X is being detected as a single sample in +# check_consistent_length +X = np.ones((10, 2)) +y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) +# The number of samples per class needs to be > n_splits, +# for StratifiedKFold(n_splits=3) +y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3]) +P = np.eye(5) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score(coo_container): + clf = MockClassifier() + X_sparse = coo_container(X) + + for a in range(-10, 10): + clf.a = a + # Smoke test + scores = cross_val_score(clf, X, y2) + assert_array_equal(scores, clf.score(X, y2)) + + # test with multioutput y + multioutput_y = np.column_stack([y2, y2[::-1]]) + scores = cross_val_score(clf, X_sparse, multioutput_y) + assert_array_equal(scores, clf.score(X_sparse, multioutput_y)) + + scores = cross_val_score(clf, X_sparse, y2) + assert_array_equal(scores, clf.score(X_sparse, y2)) + + # test with multioutput y + scores = cross_val_score(clf, X_sparse, multioutput_y) + assert_array_equal(scores, clf.score(X_sparse, multioutput_y)) + + # test with X and y as list + list_check = lambda x: isinstance(x, list) + clf = CheckingClassifier(check_X=list_check) + scores = cross_val_score(clf, X.tolist(), y2.tolist(), cv=3) + + clf = CheckingClassifier(check_y=list_check) + scores = cross_val_score(clf, X, y2.tolist(), cv=3) + + # test with 3d X and + X_3d = X[:, :, np.newaxis] + clf = MockClassifier(allow_nd=True) + scores = cross_val_score(clf, X_3d, y2) + + clf = MockClassifier(allow_nd=False) + with pytest.raises(ValueError): + cross_val_score(clf, X_3d, y2, error_score="raise") + + +def test_cross_validate_many_jobs(): + # regression test for #12154: cv='warn' with n_jobs>1 trigger a copy of + # the parameters leading to a failure in check_cv due to cv is 'warn' + # instead of cv == 'warn'. + X, y = load_iris(return_X_y=True) + clf = SVC(gamma="auto") + grid = GridSearchCV(clf, param_grid={"C": [1, 10]}) + cross_validate(grid, X, y, n_jobs=2) + + +def test_cross_validate_invalid_scoring_param(): + X, y = make_classification(random_state=0) + estimator = MockClassifier() + + # Test the errors + error_message_regexp = ".*must be unique strings.*" + + # List/tuple of callables should raise a message advising users to use + # dict of names to callables mapping + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate( + estimator, + X, + y, + scoring=(make_scorer(precision_score), make_scorer(accuracy_score)), + ) + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate(estimator, X, y, scoring=(make_scorer(precision_score),)) + + # So should empty lists/tuples + with pytest.raises(ValueError, match=error_message_regexp + "Empty list.*"): + cross_validate(estimator, X, y, scoring=()) + + # So should duplicated entries + with pytest.raises(ValueError, match=error_message_regexp + "Duplicate.*"): + cross_validate(estimator, X, y, scoring=("f1_micro", "f1_micro")) + + # Nested Lists should raise a generic error message + with pytest.raises(ValueError, match=error_message_regexp): + cross_validate(estimator, X, y, scoring=[[make_scorer(precision_score)]]) + + # Empty dict should raise invalid scoring error + with pytest.raises(ValueError, match="An empty dict"): + cross_validate(estimator, X, y, scoring=(dict())) + + multiclass_scorer = make_scorer(precision_recall_fscore_support) + + # Multiclass Scorers that return multiple values are not supported yet + # the warning message we're expecting to see + warning_message = ( + "Scoring failed. The score on this train-test " + f"partition for these parameters will be set to {np.nan}. " + "Details: \n" + ) + + with pytest.warns(UserWarning, match=warning_message): + cross_validate(estimator, X, y, scoring=multiclass_scorer) + + with pytest.warns(UserWarning, match=warning_message): + cross_validate(estimator, X, y, scoring={"foo": multiclass_scorer}) + + +def test_cross_validate_nested_estimator(): + # Non-regression test to ensure that nested + # estimators are properly returned in a list + # https://github.com/scikit-learn/scikit-learn/pull/17745 + (X, y) = load_iris(return_X_y=True) + pipeline = Pipeline( + [ + ("imputer", SimpleImputer()), + ("classifier", MockClassifier()), + ] + ) + + results = cross_validate(pipeline, X, y, return_estimator=True) + estimators = results["estimator"] + + assert isinstance(estimators, list) + assert all(isinstance(estimator, Pipeline) for estimator in estimators) + + +@pytest.mark.parametrize("use_sparse", [False, True]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cross_validate(use_sparse: bool, csr_container): + # Compute train and test mse/r2 scores + cv = KFold() + + # Regression + X_reg, y_reg = make_regression(n_samples=30, random_state=0) + reg = Ridge(random_state=0) + + # Classification + X_clf, y_clf = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + + if use_sparse: + X_reg = csr_container(X_reg) + X_clf = csr_container(X_clf) + + for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)): + # It's okay to evaluate regression metrics on classification too + mse_scorer = check_scoring(est, scoring="neg_mean_squared_error") + r2_scorer = check_scoring(est, scoring="r2") + train_mse_scores = [] + test_mse_scores = [] + train_r2_scores = [] + test_r2_scores = [] + fitted_estimators = [] + + for train, test in cv.split(X, y): + est = clone(est).fit(X[train], y[train]) + train_mse_scores.append(mse_scorer(est, X[train], y[train])) + train_r2_scores.append(r2_scorer(est, X[train], y[train])) + test_mse_scores.append(mse_scorer(est, X[test], y[test])) + test_r2_scores.append(r2_scorer(est, X[test], y[test])) + fitted_estimators.append(est) + + train_mse_scores = np.array(train_mse_scores) + test_mse_scores = np.array(test_mse_scores) + train_r2_scores = np.array(train_r2_scores) + test_r2_scores = np.array(test_r2_scores) + fitted_estimators = np.array(fitted_estimators) + + scores = ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) + + # To ensure that the test does not suffer from + # large statistical fluctuations due to slicing small datasets, + # we pass the cross-validation instance + check_cross_validate_single_metric(est, X, y, scores, cv) + check_cross_validate_multi_metric(est, X, y, scores, cv) + + +def check_cross_validate_single_metric(clf, X, y, scores, cv): + ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) = scores + # Test single metric evaluation when scoring is string or singleton list + for return_train_score, dict_len in ((True, 4), (False, 3)): + # Single metric passed as a string + if return_train_score: + mse_scores_dict = cross_validate( + clf, + X, + y, + scoring="neg_mean_squared_error", + return_train_score=True, + cv=cv, + ) + assert_array_almost_equal(mse_scores_dict["train_score"], train_mse_scores) + else: + mse_scores_dict = cross_validate( + clf, + X, + y, + scoring="neg_mean_squared_error", + return_train_score=False, + cv=cv, + ) + assert isinstance(mse_scores_dict, dict) + assert len(mse_scores_dict) == dict_len + assert_array_almost_equal(mse_scores_dict["test_score"], test_mse_scores) + + # Single metric passed as a list + if return_train_score: + # It must be True by default - deprecated + r2_scores_dict = cross_validate( + clf, X, y, scoring=["r2"], return_train_score=True, cv=cv + ) + assert_array_almost_equal(r2_scores_dict["train_r2"], train_r2_scores, True) + else: + r2_scores_dict = cross_validate( + clf, X, y, scoring=["r2"], return_train_score=False, cv=cv + ) + assert isinstance(r2_scores_dict, dict) + assert len(r2_scores_dict) == dict_len + assert_array_almost_equal(r2_scores_dict["test_r2"], test_r2_scores) + + # Test return_estimator option + mse_scores_dict = cross_validate( + clf, X, y, scoring="neg_mean_squared_error", return_estimator=True, cv=cv + ) + for k, est in enumerate(mse_scores_dict["estimator"]): + est_coef = est.coef_.copy() + if issparse(est_coef): + est_coef = est_coef.toarray() + + fitted_est_coef = fitted_estimators[k].coef_.copy() + if issparse(fitted_est_coef): + fitted_est_coef = fitted_est_coef.toarray() + + assert_almost_equal(est_coef, fitted_est_coef) + assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_) + + +def check_cross_validate_multi_metric(clf, X, y, scores, cv): + # Test multimetric evaluation when scoring is a list / dict + ( + train_mse_scores, + test_mse_scores, + train_r2_scores, + test_r2_scores, + fitted_estimators, + ) = scores + + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + return { + "r2": r2_score(y, y_pred), + "neg_mean_squared_error": -mean_squared_error(y, y_pred), + } + + all_scoring = ( + ("r2", "neg_mean_squared_error"), + { + "r2": make_scorer(r2_score), + "neg_mean_squared_error": "neg_mean_squared_error", + }, + custom_scorer, + ) + + keys_sans_train = { + "test_r2", + "test_neg_mean_squared_error", + "fit_time", + "score_time", + } + keys_with_train = keys_sans_train.union( + {"train_r2", "train_neg_mean_squared_error"} + ) + + for return_train_score in (True, False): + for scoring in all_scoring: + if return_train_score: + # return_train_score must be True by default - deprecated + cv_results = cross_validate( + clf, X, y, scoring=scoring, return_train_score=True, cv=cv + ) + assert_array_almost_equal(cv_results["train_r2"], train_r2_scores) + assert_array_almost_equal( + cv_results["train_neg_mean_squared_error"], train_mse_scores + ) + else: + cv_results = cross_validate( + clf, X, y, scoring=scoring, return_train_score=False, cv=cv + ) + assert isinstance(cv_results, dict) + assert set(cv_results.keys()) == ( + keys_with_train if return_train_score else keys_sans_train + ) + assert_array_almost_equal(cv_results["test_r2"], test_r2_scores) + assert_array_almost_equal( + cv_results["test_neg_mean_squared_error"], test_mse_scores + ) + + # Make sure all the arrays are of np.ndarray type + assert type(cv_results["test_r2"]) == np.ndarray + assert type(cv_results["test_neg_mean_squared_error"]) == np.ndarray + assert type(cv_results["fit_time"]) == np.ndarray + assert type(cv_results["score_time"]) == np.ndarray + + # Ensure all the times are within sane limits + assert np.all(cv_results["fit_time"] >= 0) + assert np.all(cv_results["fit_time"] < 10) + assert np.all(cv_results["score_time"] >= 0) + assert np.all(cv_results["score_time"] < 10) + + +def test_cross_val_score_predict_groups(): + # Check if ValueError (when groups is None) propagates to cross_val_score + # and cross_val_predict + # And also check if groups is correctly passed to the cv object + X, y = make_classification(n_samples=20, n_classes=2, random_state=0) + + clf = SVC(kernel="linear") + + group_cvs = [ + LeaveOneGroupOut(), + LeavePGroupsOut(2), + GroupKFold(), + GroupShuffleSplit(), + ] + error_message = "The 'groups' parameter should not be None." + for cv in group_cvs: + with pytest.raises(ValueError, match=error_message): + cross_val_score(estimator=clf, X=X, y=y, cv=cv) + with pytest.raises(ValueError, match=error_message): + cross_val_predict(estimator=clf, X=X, y=y, cv=cv) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +def test_cross_val_score_pandas(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + # 3 fold cross val is used so we need at least 3 samples per class + X_df, y_ser = InputFeatureType(X), TargetType(y2) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + cross_val_score(clf, X_df, y_ser, cv=3) + + +def test_cross_val_score_mask(): + # test that cross_val_score works with boolean masks + svm = SVC(kernel="linear") + iris = load_iris() + X, y = iris.data, iris.target + kfold = KFold(5) + scores_indices = cross_val_score(svm, X, y, cv=kfold) + kfold = KFold(5) + cv_masks = [] + for train, test in kfold.split(X, y): + mask_train = np.zeros(len(y), dtype=bool) + mask_test = np.zeros(len(y), dtype=bool) + mask_train[train] = 1 + mask_test[test] = 1 + cv_masks.append((train, test)) + scores_masks = cross_val_score(svm, X, y, cv=cv_masks) + assert_array_equal(scores_indices, scores_masks) + + +def test_cross_val_score_precomputed(): + # test for svm with precomputed kernel + svm = SVC(kernel="precomputed") + iris = load_iris() + X, y = iris.data, iris.target + linear_kernel = np.dot(X, X.T) + score_precomputed = cross_val_score(svm, linear_kernel, y) + svm = SVC(kernel="linear") + score_linear = cross_val_score(svm, X, y) + assert_array_almost_equal(score_precomputed, score_linear) + + # test with callable + svm = SVC(kernel=lambda x, y: np.dot(x, y.T)) + score_callable = cross_val_score(svm, X, y) + assert_array_almost_equal(score_precomputed, score_callable) + + # Error raised for non-square X + svm = SVC(kernel="precomputed") + with pytest.raises(ValueError): + cross_val_score(svm, X, y) + + # test error is raised when the precomputed kernel is not array-like + # or sparse + with pytest.raises(ValueError): + cross_val_score(svm, linear_kernel.tolist(), y) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score_fit_params(coo_container): + clf = MockClassifier() + n_samples = X.shape[0] + n_classes = len(np.unique(y)) + + W_sparse = coo_container( + (np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1) + ) + P_sparse = coo_container(np.eye(5)) + + DUMMY_INT = 42 + DUMMY_STR = "42" + DUMMY_OBJ = object() + + def assert_fit_params(clf): + # Function to test that the values are passed correctly to the + # classifier arguments for non-array type + + assert clf.dummy_int == DUMMY_INT + assert clf.dummy_str == DUMMY_STR + assert clf.dummy_obj == DUMMY_OBJ + + fit_params = { + "sample_weight": np.ones(n_samples), + "class_prior": np.full(n_classes, 1.0 / n_classes), + "sparse_sample_weight": W_sparse, + "sparse_param": P_sparse, + "dummy_int": DUMMY_INT, + "dummy_str": DUMMY_STR, + "dummy_obj": DUMMY_OBJ, + "callback": assert_fit_params, + } + cross_val_score(clf, X, y, params=fit_params) + + +def test_cross_val_score_score_func(): + clf = MockClassifier() + _score_func_args = [] + + def score_func(y_test, y_predict): + _score_func_args.append((y_test, y_predict)) + return 1.0 + + with warnings.catch_warnings(record=True): + scoring = make_scorer(score_func) + score = cross_val_score(clf, X, y, scoring=scoring, cv=3) + assert_array_equal(score, [1.0, 1.0, 1.0]) + # Test that score function is called only 3 times (for cv=3) + assert len(_score_func_args) == 3 + + +def test_cross_val_score_with_score_func_classification(): + iris = load_iris() + clf = SVC(kernel="linear") + + # Default score (should be the accuracy score) + scores = cross_val_score(clf, iris.data, iris.target) + assert_array_almost_equal(scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + # Correct classification score (aka. zero / one score) - should be the + # same as the default estimator score + zo_scores = cross_val_score(clf, iris.data, iris.target, scoring="accuracy") + assert_array_almost_equal(zo_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + # F1 score (class are balanced so f1_score should be equal to zero/one + # score + f1_scores = cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted") + assert_array_almost_equal(f1_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2) + + +def test_cross_val_score_with_score_func_regression(): + X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) + reg = Ridge() + + # Default score of the Ridge regression estimator + scores = cross_val_score(reg, X, y) + assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + # R2 score (aka. determination coefficient) - should be the + # same as the default estimator score + r2_scores = cross_val_score(reg, X, y, scoring="r2") + assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + # Mean squared error; this is a loss function, so "scores" are negative + neg_mse_scores = cross_val_score(reg, X, y, scoring="neg_mean_squared_error") + expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) + assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2) + + # Explained variance + scoring = make_scorer(explained_variance_score) + ev_scores = cross_val_score(reg, X, y, scoring=scoring) + assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_permutation_score(coo_container): + iris = load_iris() + X = iris.data + X_sparse = coo_container(X) + y = iris.target + svm = SVC(kernel="linear") + cv = StratifiedKFold(2) + + score, scores, pvalue = permutation_test_score( + svm, X, y, n_permutations=30, cv=cv, scoring="accuracy" + ) + assert score > 0.9 + assert_almost_equal(pvalue, 0.0, 1) + + score_group, _, pvalue_group = permutation_test_score( + svm, + X, + y, + n_permutations=30, + cv=cv, + scoring="accuracy", + groups=np.ones(y.size), + random_state=0, + ) + assert score_group == score + assert pvalue_group == pvalue + + # check that we obtain the same results with a sparse representation + svm_sparse = SVC(kernel="linear") + cv_sparse = StratifiedKFold(2) + score_group, _, pvalue_group = permutation_test_score( + svm_sparse, + X_sparse, + y, + n_permutations=30, + cv=cv_sparse, + scoring="accuracy", + groups=np.ones(y.size), + random_state=0, + ) + + assert score_group == score + assert pvalue_group == pvalue + + # test with custom scoring object + def custom_score(y_true, y_pred): + return ((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0] + + scorer = make_scorer(custom_score) + score, _, pvalue = permutation_test_score( + svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0 + ) + assert_almost_equal(score, 0.93, 2) + assert_almost_equal(pvalue, 0.01, 3) + + # set random y + y = np.mod(np.arange(len(y)), 3) + + score, scores, pvalue = permutation_test_score( + svm, X, y, n_permutations=30, cv=cv, scoring="accuracy" + ) + + assert score < 0.5 + assert pvalue > 0.2 + + +def test_permutation_test_score_allow_nans(): + # Check that permutation_test_score allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + permutation_test_score(p, X, y) + + +def test_permutation_test_score_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + permutation_test_score(clf, X, y) + + err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!" + with pytest.raises(ValueError, match=err_msg): + permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(1)}) + permutation_test_score(clf, X, y, fit_params={"sample_weight": np.ones(10)}) + + +def test_cross_val_score_allow_nans(): + # Check that cross_val_score allows input data with NaNs + X = np.arange(200, dtype=np.float64).reshape(10, -1) + X[2, :] = np.nan + y = np.repeat([0, 1], X.shape[0] / 2) + p = Pipeline( + [ + ("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)), + ("classifier", MockClassifier()), + ] + ) + cross_val_score(p, X, y) + + +def test_cross_val_score_multilabel(): + X = np.array( + [ + [-3, 4], + [2, 4], + [3, 3], + [0, 2], + [-3, 1], + [-2, 1], + [0, 0], + [-2, -1], + [-1, -2], + [1, -2], + ] + ) + y = np.array( + [[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]] + ) + clf = KNeighborsClassifier(n_neighbors=1) + scoring_micro = make_scorer(precision_score, average="micro") + scoring_macro = make_scorer(precision_score, average="macro") + scoring_samples = make_scorer(precision_score, average="samples") + score_micro = cross_val_score(clf, X, y, scoring=scoring_micro) + score_macro = cross_val_score(clf, X, y, scoring=scoring_macro) + score_samples = cross_val_score(clf, X, y, scoring=scoring_samples) + assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) + assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) + assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_predict(coo_container): + X, y = load_diabetes(return_X_y=True) + cv = KFold() + + est = Ridge() + + # Naive loop (should be same as cross_val_predict): + preds2 = np.zeros_like(y) + for train, test in cv.split(X, y): + est.fit(X[train], y[train]) + preds2[test] = est.predict(X[test]) + + preds = cross_val_predict(est, X, y, cv=cv) + assert_array_almost_equal(preds, preds2) + + preds = cross_val_predict(est, X, y) + assert len(preds) == len(y) + + cv = LeaveOneOut() + preds = cross_val_predict(est, X, y, cv=cv) + assert len(preds) == len(y) + + Xsp = X.copy() + Xsp *= Xsp > np.median(Xsp) + Xsp = coo_container(Xsp) + preds = cross_val_predict(est, Xsp, y) + assert_array_almost_equal(len(preds), len(y)) + + preds = cross_val_predict(KMeans(n_init="auto"), X) + assert len(preds) == len(y) + + class BadCV: + def split(self, X, y=None, groups=None): + for i in range(4): + yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) + + with pytest.raises(ValueError): + cross_val_predict(est, X, y, cv=BadCV()) + + X, y = load_iris(return_X_y=True) + + warning_message = ( + r"Number of classes in training fold \(2\) does " + r"not match total number of classes \(3\). " + "Results may not be appropriate for your use case." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + cross_val_predict( + LogisticRegression(solver="liblinear"), + X, + y, + method="predict_proba", + cv=KFold(2), + ) + + +def test_cross_val_predict_decision_function_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="decision_function" + ) + assert preds.shape == (50,) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="decision_function" + ) + assert preds.shape == (150, 3) + + # This specifically tests imbalanced splits for binary + # classification with decision_function. This is only + # applicable to classifiers that can be fit on a single + # class. + X = X[:100] + y = y[:100] + error_message = ( + "Only 1 class/es in training fold," + " but 2 in overall dataset. This" + " is not supported for decision_function" + " with imbalanced folds. To fix " + "this, use a cross-validation technique " + "resulting in properly stratified folds" + ) + with pytest.raises(ValueError, match=error_message): + cross_val_predict( + RidgeClassifier(), X, y, method="decision_function", cv=KFold(2) + ) + + X, y = load_digits(return_X_y=True) + est = SVC(kernel="linear", decision_function_shape="ovo") + + preds = cross_val_predict(est, X, y, method="decision_function") + assert preds.shape == (1797, 45) + + ind = np.argsort(y) + X, y = X[ind], y[ind] + error_message_regexp = ( + r"Output shape \(599L?, 21L?\) of " + "decision_function does not match number of " + r"classes \(7\) in fold. Irregular " + "decision_function .*" + ) + with pytest.raises(ValueError, match=error_message_regexp): + cross_val_predict(est, X, y, cv=KFold(n_splits=3), method="decision_function") + + +def test_cross_val_predict_predict_proba_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_proba" + ) + assert preds.shape == (50, 2) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_proba" + ) + assert preds.shape == (150, 3) + + +def test_cross_val_predict_predict_log_proba_shape(): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba" + ) + assert preds.shape == (50, 2) + + X, y = load_iris(return_X_y=True) + + preds = cross_val_predict( + LogisticRegression(solver="liblinear"), X, y, method="predict_log_proba" + ) + assert preds.shape == (150, 3) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_predict_input_types(coo_container): + iris = load_iris() + X, y = iris.data, iris.target + X_sparse = coo_container(X) + multioutput_y = np.column_stack([y, y[::-1]]) + + clf = Ridge(fit_intercept=False, random_state=0) + # 3 fold cv is used --> at least 3 samples per class + # Smoke test + predictions = cross_val_predict(clf, X, y) + assert predictions.shape == (150,) + + # test with multioutput y + predictions = cross_val_predict(clf, X_sparse, multioutput_y) + assert predictions.shape == (150, 2) + + predictions = cross_val_predict(clf, X_sparse, y) + assert_array_equal(predictions.shape, (150,)) + + # test with multioutput y + predictions = cross_val_predict(clf, X_sparse, multioutput_y) + assert_array_equal(predictions.shape, (150, 2)) + + # test with X and y as list + list_check = lambda x: isinstance(x, list) + clf = CheckingClassifier(check_X=list_check) + predictions = cross_val_predict(clf, X.tolist(), y.tolist()) + + clf = CheckingClassifier(check_y=list_check) + predictions = cross_val_predict(clf, X, y.tolist()) + + # test with X and y as list and non empty method + predictions = cross_val_predict( + LogisticRegression(solver="liblinear"), + X.tolist(), + y.tolist(), + method="decision_function", + ) + predictions = cross_val_predict( + LogisticRegression(solver="liblinear"), + X, + y.tolist(), + method="decision_function", + ) + + # test with 3d X and + X_3d = X[:, :, np.newaxis] + check_3d = lambda x: x.ndim == 3 + clf = CheckingClassifier(check_X=check_3d) + predictions = cross_val_predict(clf, X_3d, y) + assert_array_equal(predictions.shape, (150,)) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +# python3.7 deprecation warnings in pandas via matplotlib :-/ +def test_cross_val_predict_pandas(): + # check cross_val_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + X_df, y_ser = InputFeatureType(X), TargetType(y2) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + cross_val_predict(clf, X_df, y_ser, cv=3) + + +def test_cross_val_predict_unbalanced(): + X, y = make_classification( + n_samples=100, + n_features=2, + n_redundant=0, + n_informative=2, + n_clusters_per_class=1, + random_state=1, + ) + # Change the first sample to a new class + y[0] = 2 + clf = LogisticRegression(random_state=1, solver="liblinear") + cv = StratifiedKFold(n_splits=2) + train, test = list(cv.split(X, y)) + yhat_proba = cross_val_predict(clf, X, y, cv=cv, method="predict_proba") + assert y[test[0]][0] == 2 # sanity check for further assertions + assert np.all(yhat_proba[test[0]][:, 2] == 0) + assert np.all(yhat_proba[test[0]][:, 0:1] > 0) + assert np.all(yhat_proba[test[1]] > 0) + assert_array_almost_equal(yhat_proba.sum(axis=1), np.ones(y.shape), decimal=12) + + +def test_cross_val_predict_y_none(): + # ensure that cross_val_predict works when y is None + mock_classifier = MockClassifier() + rng = np.random.RandomState(42) + X = rng.rand(100, 10) + y_hat = cross_val_predict(mock_classifier, X, y=None, cv=5, method="predict") + assert_allclose(X[:, 0], y_hat) + y_hat_proba = cross_val_predict( + mock_classifier, X, y=None, cv=5, method="predict_proba" + ) + assert_allclose(X, y_hat_proba) + + +@pytest.mark.parametrize("coo_container", COO_CONTAINERS) +def test_cross_val_score_sparse_fit_params(coo_container): + iris = load_iris() + X, y = iris.data, iris.target + clf = MockClassifier() + fit_params = {"sparse_sample_weight": coo_container(np.eye(X.shape[0]))} + a = cross_val_score(clf, X, y, params=fit_params, cv=3) + assert_array_equal(a, np.ones(3)) + + +def test_learning_curve(): + n_samples = 30 + n_splits = 3 + X, y = make_classification( + n_samples=n_samples, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits)) + for shuffle_train in [False, True]: + with warnings.catch_warnings(record=True) as w: + ( + train_sizes, + train_scores, + test_scores, + fit_times, + score_times, + ) = learning_curve( + estimator, + X, + y, + cv=KFold(n_splits=n_splits), + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + return_times=True, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + assert train_scores.shape == (10, 3) + assert test_scores.shape == (10, 3) + assert fit_times.shape == (10, 3) + assert score_times.shape == (10, 3) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + # Cannot use assert_array_almost_equal for fit and score times because + # the values are hardware-dependant + assert fit_times.dtype == "float64" + assert score_times.dtype == "float64" + + # Test a custom cv splitter that can iterate only once + with warnings.catch_warnings(record=True) as w: + train_sizes2, train_scores2, test_scores2 = learning_curve( + estimator, + X, + y, + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + assert_array_almost_equal(train_scores2, train_scores) + assert_array_almost_equal(test_scores2, test_scores) + + +def test_learning_curve_unsupervised(): + X, _ = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10) + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_verbose(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y, cv=3, verbose=1 + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert "[learning_curve]" in out + + +def test_learning_curve_incremental_learning_not_possible(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + # The mockup does not have partial_fit() + estimator = MockImprovingEstimator(1) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, exploit_incremental_learning=True) + + +def test_learning_curve_incremental_learning(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20) + for shuffle_train in [False, True]: + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + shuffle=shuffle_train, + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_incremental_learning_unsupervised(): + X, _ = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20) + train_sizes, train_scores, test_scores = learning_curve( + estimator, + X, + y=None, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_batch_and_incremental_learning_are_equal(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + train_sizes = np.linspace(0.2, 1.0, 5) + estimator = PassiveAggressiveClassifier(max_iter=1, tol=None, shuffle=False) + + train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve( + estimator, + X, + y, + train_sizes=train_sizes, + cv=3, + exploit_incremental_learning=True, + ) + train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve( + estimator, + X, + y, + cv=3, + train_sizes=train_sizes, + exploit_incremental_learning=False, + ) + + assert_array_equal(train_sizes_inc, train_sizes_batch) + assert_array_almost_equal( + train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1) + ) + assert_array_almost_equal( + test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1) + ) + + +def test_learning_curve_n_sample_range_out_of_bounds(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0, 1]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0.0, 1.0]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0.1, 1.1]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[0, 20]) + with pytest.raises(ValueError): + learning_curve(estimator, X, y, cv=3, train_sizes=[1, 21]) + + +def test_learning_curve_remove_duplicate_sample_sizes(): + X, y = make_classification( + n_samples=3, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(2) + warning_message = ( + "Removed duplicate entries from 'train_sizes'. Number of ticks " + "will be less than the size of 'train_sizes': 2 instead of 3." + ) + with pytest.warns(RuntimeWarning, match=warning_message): + train_sizes, _, _ = learning_curve( + estimator, X, y, cv=3, train_sizes=np.linspace(0.33, 1.0, 3) + ) + assert_array_equal(train_sizes, [1, 2]) + + +def test_learning_curve_with_boolean_indices(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockImprovingEstimator(20) + cv = KFold(n_splits=3) + train_sizes, train_scores, test_scores = learning_curve( + estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10) + ) + assert_array_equal(train_sizes, np.linspace(2, 20, 10)) + assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) + assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) + + +def test_learning_curve_with_shuffle(): + # Following test case was designed this way to verify the code + # changes made in pull request: #7506. + X = np.array( + [ + [1, 2], + [3, 4], + [5, 6], + [7, 8], + [11, 12], + [13, 14], + [15, 16], + [17, 18], + [19, 20], + [7, 8], + [9, 10], + [11, 12], + [13, 14], + [15, 16], + [17, 18], + ] + ) + y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4]) + groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4]) + # Splits on these groups fail without shuffle as the first iteration + # of the learning curve doesn't contain label 4 in the training set. + estimator = PassiveAggressiveClassifier(max_iter=5, tol=None, shuffle=False) + + cv = GroupKFold(n_splits=2) + train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + shuffle=True, + random_state=2, + ) + assert_array_almost_equal( + train_scores_batch.mean(axis=1), np.array([0.75, 0.3, 0.36111111]) + ) + assert_array_almost_equal( + test_scores_batch.mean(axis=1), np.array([0.36111111, 0.25, 0.25]) + ) + with pytest.raises(ValueError): + learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + error_score="raise", + ) + + train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve( + estimator, + X, + y, + cv=cv, + n_jobs=1, + train_sizes=np.linspace(0.3, 1.0, 3), + groups=groups, + shuffle=True, + random_state=2, + exploit_incremental_learning=True, + ) + assert_array_almost_equal( + train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1) + ) + assert_array_almost_equal( + test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1) + ) + + +def test_learning_curve_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + learning_curve(clf, X, y, error_score="raise") + + err_msg = r"sample_weight.shape == \(1,\), expected \(2,\)!" + with pytest.raises(ValueError, match=err_msg): + learning_curve( + clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(1)} + ) + learning_curve( + clf, X, y, error_score="raise", fit_params={"sample_weight": np.ones(10)} + ) + + +def test_learning_curve_incremental_learning_fit_params(): + X, y = make_classification( + n_samples=30, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + estimator = MockIncrementalImprovingEstimator(20, ["sample_weight"]) + err_msg = r"Expected fit parameter\(s\) \['sample_weight'\] not seen." + with pytest.raises(AssertionError, match=err_msg): + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + ) + + err_msg = "Fit parameter sample_weight has length 3; expected" + with pytest.raises(AssertionError, match=err_msg): + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + fit_params={"sample_weight": np.ones(3)}, + ) + + learning_curve( + estimator, + X, + y, + cv=3, + exploit_incremental_learning=True, + train_sizes=np.linspace(0.1, 1.0, 10), + error_score="raise", + fit_params={"sample_weight": np.ones(2)}, + ) + + +def test_validation_curve(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + param_range = np.linspace(0, 1, 10) + with warnings.catch_warnings(record=True) as w: + train_scores, test_scores = validation_curve( + MockEstimatorWithParameter(), + X, + y, + param_name="param", + param_range=param_range, + cv=2, + ) + if len(w) > 0: + raise RuntimeError("Unexpected warning: %r" % w[0].message) + + assert_array_almost_equal(train_scores.mean(axis=1), param_range) + assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range) + + +def test_validation_curve_clone_estimator(): + X, y = make_classification( + n_samples=2, + n_features=1, + n_informative=1, + n_redundant=0, + n_classes=2, + n_clusters_per_class=1, + random_state=0, + ) + + param_range = np.linspace(1, 0, 10) + _, _ = validation_curve( + MockEstimatorWithSingleFitCallAllowed(), + X, + y, + param_name="param", + param_range=param_range, + cv=2, + ) + + +def test_validation_curve_cv_splits_consistency(): + n_samples = 100 + n_splits = 5 + X, y = make_classification(n_samples=100, random_state=0) + + scores1 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples), + ) + # The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the + # `split` is called for each parameter, the following should produce + # identical results for param setting 1 and param setting 2 as both have + # the same C value. + assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :], 2)) + + scores2 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=KFold(n_splits=n_splits, shuffle=True), + ) + + # For scores2, compare the 1st and 2nd parameter's scores + # (Since the C value for 1st two param setting is 0.1, they must be + # consistent unless the train test folds differ between the param settings) + assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :], 2)) + + scores3 = validation_curve( + SVC(kernel="linear", random_state=0), + X, + y, + param_name="C", + param_range=[0.1, 0.1, 0.2, 0.2], + cv=KFold(n_splits=n_splits), + ) + + # OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check. + assert_array_almost_equal(np.array(scores3), np.array(scores1)) + + +def test_validation_curve_fit_params(): + X = np.arange(100).reshape(10, 10) + y = np.array([0] * 5 + [1] * 5) + clf = CheckingClassifier(expected_sample_weight=True) + + err_msg = r"Expected sample_weight to be passed" + with pytest.raises(AssertionError, match=err_msg): + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + ) + + err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!" + with pytest.raises(ValueError, match=err_msg): + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + fit_params={"sample_weight": np.ones(1)}, + ) + validation_curve( + clf, + X, + y, + param_name="foo_param", + param_range=[1, 2, 3], + error_score="raise", + fit_params={"sample_weight": np.ones(10)}, + ) + + +def test_check_is_permutation(): + rng = np.random.RandomState(0) + p = np.arange(100) + rng.shuffle(p) + assert _check_is_permutation(p, 100) + assert not _check_is_permutation(np.delete(p, 23), 100) + + p[0] = 23 + assert not _check_is_permutation(p, 100) + + # Check if the additional duplicate indices are caught + assert not _check_is_permutation(np.hstack((p, 0)), 100) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_cross_val_predict_sparse_prediction(csr_container): + # check that cross_val_predict gives same result for sparse and dense input + X, y = make_multilabel_classification( + n_classes=2, + n_labels=1, + allow_unlabeled=False, + return_indicator=True, + random_state=1, + ) + X_sparse = csr_container(X) + y_sparse = csr_container(y) + classif = OneVsRestClassifier(SVC(kernel="linear")) + preds = cross_val_predict(classif, X, y, cv=10) + preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10) + preds_sparse = preds_sparse.toarray() + assert_array_almost_equal(preds_sparse, preds) + + +def check_cross_val_predict_binary(est, X, y, method): + """Helper for tests of cross_val_predict with binary classification""" + cv = KFold(n_splits=3, shuffle=False) + + # Generate expected outputs + if y.ndim == 1: + exp_shape = (len(X),) if method == "decision_function" else (len(X), 2) + else: + exp_shape = y.shape + expected_predictions = np.zeros(exp_shape) + for train, test in cv.split(X, y): + est = clone(est).fit(X[train], y[train]) + expected_predictions[test] = getattr(est, method)(X[test]) + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + assert_allclose( + cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions + ) + + +def check_cross_val_predict_multiclass(est, X, y, method): + """Helper for tests of cross_val_predict with multiclass classification""" + cv = KFold(n_splits=3, shuffle=False) + + # Generate expected outputs + float_min = np.finfo(np.float64).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + expected_predictions = np.full( + (len(X), len(set(y))), default_values[method], dtype=np.float64 + ) + _, y_enc = np.unique(y, return_inverse=True) + for train, test in cv.split(X, y_enc): + est = clone(est).fit(X[train], y_enc[train]) + fold_preds = getattr(est, method)(X[test]) + i_cols_fit = np.unique(y_enc[train]) + expected_predictions[np.ix_(test, i_cols_fit)] = fold_preds + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + assert_allclose( + cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions + ) + + +def check_cross_val_predict_multilabel(est, X, y, method): + """Check the output of cross_val_predict for 2D targets using + Estimators which provide a predictions as a list with one + element per class. + """ + cv = KFold(n_splits=3, shuffle=False) + + # Create empty arrays of the correct size to hold outputs + float_min = np.finfo(np.float64).min + default_values = { + "decision_function": float_min, + "predict_log_proba": float_min, + "predict_proba": 0, + } + n_targets = y.shape[1] + expected_preds = [] + for i_col in range(n_targets): + n_classes_in_label = len(set(y[:, i_col])) + if n_classes_in_label == 2 and method == "decision_function": + exp_shape = (len(X),) + else: + exp_shape = (len(X), n_classes_in_label) + expected_preds.append( + np.full(exp_shape, default_values[method], dtype=np.float64) + ) + + # Generate expected outputs + y_enc_cols = [ + np.unique(y[:, i], return_inverse=True)[1][:, np.newaxis] + for i in range(y.shape[1]) + ] + y_enc = np.concatenate(y_enc_cols, axis=1) + for train, test in cv.split(X, y_enc): + est = clone(est).fit(X[train], y_enc[train]) + fold_preds = getattr(est, method)(X[test]) + for i_col in range(n_targets): + fold_cols = np.unique(y_enc[train][:, i_col]) + if expected_preds[i_col].ndim == 1: + # Decision function with <=2 classes + expected_preds[i_col][test] = fold_preds[i_col] + else: + idx = np.ix_(test, fold_cols) + expected_preds[i_col][idx] = fold_preds[i_col] + + # Check actual outputs for several representations of y + for tg in [y, y + 1, y - 2, y.astype("str")]: + cv_predict_output = cross_val_predict(est, X, tg, method=method, cv=cv) + assert len(cv_predict_output) == len(expected_preds) + for i in range(len(cv_predict_output)): + assert_allclose(cv_predict_output[i], expected_preds[i]) + + +def check_cross_val_predict_with_method_binary(est): + # This test includes the decision_function with two classes. + # This is a special case: it has only one column of output. + X, y = make_classification(n_classes=2, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_binary(est, X, y, method) + + +def check_cross_val_predict_with_method_multiclass(est): + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method(): + check_cross_val_predict_with_method_binary(LogisticRegression(solver="liblinear")) + check_cross_val_predict_with_method_multiclass( + LogisticRegression(solver="liblinear") + ) + + +def test_cross_val_predict_method_checking(): + # Regression test for issue #9639. Tests that cross_val_predict does not + # check estimator methods (e.g. predict_proba) before fitting + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + est = SGDClassifier(loss="log_loss", random_state=2) + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_gridsearchcv_cross_val_predict_with_method(): + iris = load_iris() + X, y = iris.data, iris.target + X, y = shuffle(X, y, random_state=0) + est = GridSearchCV( + LogisticRegression(random_state=42, solver="liblinear"), {"C": [0.1, 1]}, cv=2 + ) + for method in ["decision_function", "predict_proba", "predict_log_proba"]: + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method_multilabel_ovr(): + # OVR does multilabel predictions, but only arrays of + # binary indicator columns. The output of predict_proba + # is a 2D array with shape (n_samples, n_classes). + n_samp = 100 + n_classes = 4 + X, y = make_multilabel_classification( + n_samples=n_samp, n_labels=3, n_classes=n_classes, n_features=5, random_state=42 + ) + est = OneVsRestClassifier(LogisticRegression(solver="liblinear", random_state=0)) + for method in ["predict_proba", "decision_function"]: + check_cross_val_predict_binary(est, X, y, method=method) + + +class RFWithDecisionFunction(RandomForestClassifier): + # None of the current multioutput-multiclass estimators have + # decision function methods. Create a mock decision function + # to test the cross_val_predict function's handling of this case. + def decision_function(self, X): + probs = self.predict_proba(X) + msg = "This helper should only be used on multioutput-multiclass tasks" + assert isinstance(probs, list), msg + probs = [p[:, -1] if p.shape[1] == 2 else p for p in probs] + return probs + + +def test_cross_val_predict_with_method_multilabel_rf(): + # The RandomForest allows multiple classes in each label. + # Output of predict_proba is a list of outputs of predict_proba + # for each individual label. + n_classes = 4 + X, y = make_multilabel_classification( + n_samples=100, n_labels=3, n_classes=n_classes, n_features=5, random_state=42 + ) + y[:, 0] += y[:, 1] # Put three classes in the first column + for method in ["predict_proba", "predict_log_proba", "decision_function"]: + est = RFWithDecisionFunction(n_estimators=5, random_state=0) + with warnings.catch_warnings(): + # Suppress "RuntimeWarning: divide by zero encountered in log" + warnings.simplefilter("ignore") + check_cross_val_predict_multilabel(est, X, y, method=method) + + +def test_cross_val_predict_with_method_rare_class(): + # Test a multiclass problem where one class will be missing from + # one of the CV training sets. + rng = np.random.RandomState(0) + X = rng.normal(0, 1, size=(14, 10)) + y = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 3]) + est = LogisticRegression(solver="liblinear") + for method in ["predict_proba", "predict_log_proba", "decision_function"]: + with warnings.catch_warnings(): + # Suppress warning about too few examples of a class + warnings.simplefilter("ignore") + check_cross_val_predict_multiclass(est, X, y, method) + + +def test_cross_val_predict_with_method_multilabel_rf_rare_class(): + # The RandomForest allows anything for the contents of the labels. + # Output of predict_proba is a list of outputs of predict_proba + # for each individual label. + # In this test, the first label has a class with a single example. + # We'll have one CV fold where the training data don't include it. + rng = np.random.RandomState(0) + X = rng.normal(0, 1, size=(5, 10)) + y = np.array([[0, 0], [1, 1], [2, 1], [0, 1], [1, 0]]) + for method in ["predict_proba", "predict_log_proba"]: + est = RFWithDecisionFunction(n_estimators=5, random_state=0) + with warnings.catch_warnings(): + # Suppress "RuntimeWarning: divide by zero encountered in log" + warnings.simplefilter("ignore") + check_cross_val_predict_multilabel(est, X, y, method=method) + + +def get_expected_predictions(X, y, cv, classes, est, method): + expected_predictions = np.zeros([len(y), classes]) + func = getattr(est, method) + + for train, test in cv.split(X, y): + est.fit(X[train], y[train]) + expected_predictions_ = func(X[test]) + # To avoid 2 dimensional indexing + if method == "predict_proba": + exp_pred_test = np.zeros((len(test), classes)) + else: + exp_pred_test = np.full( + (len(test), classes), np.finfo(expected_predictions.dtype).min + ) + exp_pred_test[:, est.classes_] = expected_predictions_ + expected_predictions[test] = exp_pred_test + + return expected_predictions + + +def test_cross_val_predict_class_subset(): + X = np.arange(200).reshape(100, 2) + y = np.array([x // 10 for x in range(100)]) + classes = 10 + + kfold3 = KFold(n_splits=3) + kfold4 = KFold(n_splits=4) + + le = LabelEncoder() + + methods = ["decision_function", "predict_proba", "predict_log_proba"] + for method in methods: + est = LogisticRegression(solver="liblinear") + + # Test with n_splits=3 + predictions = cross_val_predict(est, X, y, method=method, cv=kfold3) + + # Runs a naive loop (should be same as cross_val_predict): + expected_predictions = get_expected_predictions( + X, y, kfold3, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + # Test with n_splits=4 + predictions = cross_val_predict(est, X, y, method=method, cv=kfold4) + expected_predictions = get_expected_predictions( + X, y, kfold4, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + # Testing unordered labels + y = shuffle(np.repeat(range(10), 10), random_state=0) + predictions = cross_val_predict(est, X, y, method=method, cv=kfold3) + y = le.fit_transform(y) + expected_predictions = get_expected_predictions( + X, y, kfold3, classes, est, method + ) + assert_array_almost_equal(expected_predictions, predictions) + + +def test_score_memmap(): + # Ensure a scalar score of memmap type is accepted + iris = load_iris() + X, y = iris.data, iris.target + clf = MockClassifier() + tf = tempfile.NamedTemporaryFile(mode="wb", delete=False) + tf.write(b"Hello world!!!!!") + tf.close() + scores = np.memmap(tf.name, dtype=np.float64) + score = np.memmap(tf.name, shape=(), mode="r", dtype=np.float64) + try: + cross_val_score(clf, X, y, scoring=lambda est, X, y: score) + with pytest.raises(ValueError): + cross_val_score(clf, X, y, scoring=lambda est, X, y: scores) + finally: + # Best effort to release the mmap file handles before deleting the + # backing file under Windows + scores, score = None, None + for _ in range(3): + try: + os.unlink(tf.name) + break + except OSError: + sleep(1.0) + + +@pytest.mark.filterwarnings("ignore: Using or importing the ABCs from") +def test_permutation_test_score_pandas(): + # check permutation_test_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import DataFrame, Series + + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + iris = load_iris() + X, y = iris.data, iris.target + X_df, y_ser = InputFeatureType(X), TargetType(y) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + permutation_test_score(clf, X_df, y_ser) + + +def test_fit_and_score_failing(): + # Create a failing classifier to deliberately fail + failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) + # dummy X data + X = np.arange(1, 10) + fit_and_score_args = dict( + estimator=failing_clf, + X=X, + y=None, + scorer=dict(), + train=None, + test=None, + verbose=0, + parameters=None, + fit_params=None, + score_params=None, + ) + # passing error score to trigger the warning message + fit_and_score_args["error_score"] = "raise" + # check if exception was raised, with default error_score='raise' + with pytest.raises(ValueError, match="Failing classifier failed as required"): + _fit_and_score(**fit_and_score_args) + + assert failing_clf.score() == 0.0 # FailingClassifier coverage + + +def test_fit_and_score_working(): + X, y = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + train, test = next(ShuffleSplit().split(X)) + # Test return_parameters option + fit_and_score_args = dict( + estimator=clf, + X=X, + y=y, + scorer=dict(), + train=train, + test=test, + verbose=0, + parameters={"max_iter": 100, "tol": 0.1}, + fit_params=None, + score_params=None, + return_parameters=True, + ) + result = _fit_and_score(**fit_and_score_args) + assert result["parameters"] == fit_and_score_args["parameters"] + + +class DataDependentFailingClassifier(BaseEstimator): + def __init__(self, max_x_value=None): + self.max_x_value = max_x_value + + def fit(self, X, y=None): + num_values_too_high = (X > self.max_x_value).sum() + if num_values_too_high: + raise ValueError( + f"Classifier fit failed with {num_values_too_high} values too high" + ) + + def score(self, X=None, Y=None): + return 0.0 + + +@pytest.mark.parametrize("error_score", [np.nan, 0]) +def test_cross_validate_some_failing_fits_warning(error_score): + # Create a failing classifier to deliberately fail + failing_clf = DataDependentFailingClassifier(max_x_value=8) + # dummy X data + X = np.arange(1, 10) + y = np.ones(9) + # passing error score to trigger the warning message + cross_validate_args = [failing_clf, X, y] + cross_validate_kwargs = {"cv": 3, "error_score": error_score} + # check if the warning message type is as expected + + individual_fit_error_message = ( + "ValueError: Classifier fit failed with 1 values too high" + ) + warning_message = re.compile( + ( + "2 fits failed.+total of 3.+The score on these" + " train-test partitions for these parameters will be set to" + f" {cross_validate_kwargs['error_score']}.+{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.warns(FitFailedWarning, match=warning_message): + cross_validate(*cross_validate_args, **cross_validate_kwargs) + + +@pytest.mark.parametrize("error_score", [np.nan, 0]) +def test_cross_validate_all_failing_fits_error(error_score): + # Create a failing classifier to deliberately fail + failing_clf = FailingClassifier(FailingClassifier.FAILING_PARAMETER) + # dummy X data + X = np.arange(1, 10) + y = np.ones(9) + + cross_validate_args = [failing_clf, X, y] + cross_validate_kwargs = {"cv": 7, "error_score": error_score} + + individual_fit_error_message = "ValueError: Failing classifier failed as required" + error_message = re.compile( + ( + "All the 7 fits failed.+your model is misconfigured.+" + f"{individual_fit_error_message}" + ), + flags=re.DOTALL, + ) + + with pytest.raises(ValueError, match=error_message): + cross_validate(*cross_validate_args, **cross_validate_kwargs) + + +def _failing_scorer(estimator, X, y, error_msg): + raise ValueError(error_msg) + + +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge") +@pytest.mark.parametrize("error_score", [np.nan, 0, "raise"]) +def test_cross_val_score_failing_scorer(error_score): + # check that an estimator can fail during scoring in `cross_val_score` and + # that we can optionally replaced it with `error_score` + X, y = load_iris(return_X_y=True) + clf = LogisticRegression(max_iter=5).fit(X, y) + + error_msg = "This scorer is supposed to fail!!!" + failing_scorer = partial(_failing_scorer, error_msg=error_msg) + + if error_score == "raise": + with pytest.raises(ValueError, match=error_msg): + cross_val_score( + clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score + ) + else: + warning_msg = ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}" + ) + with pytest.warns(UserWarning, match=warning_msg): + scores = cross_val_score( + clf, X, y, cv=3, scoring=failing_scorer, error_score=error_score + ) + assert_allclose(scores, error_score) + + +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge") +@pytest.mark.parametrize("error_score", [np.nan, 0, "raise"]) +@pytest.mark.parametrize("return_train_score", [True, False]) +@pytest.mark.parametrize("with_multimetric", [False, True]) +def test_cross_validate_failing_scorer( + error_score, return_train_score, with_multimetric +): + # Check that an estimator can fail during scoring in `cross_validate` and + # that we can optionally replace it with `error_score`. In the multimetric + # case also check the result of a non-failing scorer where the other scorers + # are failing. + X, y = load_iris(return_X_y=True) + clf = LogisticRegression(max_iter=5).fit(X, y) + + error_msg = "This scorer is supposed to fail!!!" + failing_scorer = partial(_failing_scorer, error_msg=error_msg) + if with_multimetric: + non_failing_scorer = make_scorer(mean_squared_error) + scoring = { + "score_1": failing_scorer, + "score_2": non_failing_scorer, + "score_3": failing_scorer, + } + else: + scoring = failing_scorer + + if error_score == "raise": + with pytest.raises(ValueError, match=error_msg): + cross_validate( + clf, + X, + y, + cv=3, + scoring=scoring, + return_train_score=return_train_score, + error_score=error_score, + ) + else: + warning_msg = ( + "Scoring failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}" + ) + with pytest.warns(UserWarning, match=warning_msg): + results = cross_validate( + clf, + X, + y, + cv=3, + scoring=scoring, + return_train_score=return_train_score, + error_score=error_score, + ) + for key in results: + if "_score" in key: + if "_score_2" in key: + # check the test (and optionally train) score for the + # scorer that should be non-failing + for i in results[key]: + assert isinstance(i, float) + else: + # check the test (and optionally train) score for all + # scorers that should be assigned to `error_score`. + assert_allclose(results[key], error_score) + + +def three_params_scorer(i, j, k): + return 3.4213 + + +@pytest.mark.parametrize( + "train_score, scorer, verbose, split_prg, cdt_prg, expected", + [ + ( + False, + three_params_scorer, + 2, + (1, 3), + (0, 1), + r"\[CV\] END ...................................................." + r" total time= 0.\ds", + ), + ( + True, + {"sc1": three_params_scorer, "sc2": three_params_scorer}, + 3, + (1, 3), + (0, 1), + r"\[CV 2/3\] END sc1: \(train=3.421, test=3.421\) sc2: " + r"\(train=3.421, test=3.421\) total time= 0.\ds", + ), + ( + False, + {"sc1": three_params_scorer, "sc2": three_params_scorer}, + 10, + (1, 3), + (0, 1), + r"\[CV 2/3; 1/1\] END ....... sc1: \(test=3.421\) sc2: \(test=3.421\)" + r" total time= 0.\ds", + ), + ], +) +def test_fit_and_score_verbosity( + capsys, train_score, scorer, verbose, split_prg, cdt_prg, expected +): + X, y = make_classification(n_samples=30, random_state=0) + clf = SVC(kernel="linear", random_state=0) + train, test = next(ShuffleSplit().split(X)) + + # test print without train score + fit_and_score_args = dict( + estimator=clf, + X=X, + y=y, + scorer=scorer, + train=train, + test=test, + verbose=verbose, + parameters=None, + fit_params=None, + score_params=None, + return_train_score=train_score, + split_progress=split_prg, + candidate_progress=cdt_prg, + ) + _fit_and_score(**fit_and_score_args) + out, _ = capsys.readouterr() + outlines = out.split("\n") + if len(outlines) > 2: + assert re.match(expected, outlines[1]) + else: + assert re.match(expected, outlines[0]) + + +def test_score(): + error_message = "scoring must return a number, got None" + + def two_params_scorer(estimator, X_test): + return None + + with pytest.raises(ValueError, match=error_message): + _score( + estimator=None, + X_test=None, + y_test=None, + scorer=two_params_scorer, + score_params=None, + error_score=np.nan, + ) + + +def test_callable_multimetric_confusion_matrix_cross_validate(): + def custom_scorer(clf, X, y): + y_pred = clf.predict(X) + cm = confusion_matrix(y, y_pred) + return {"tn": cm[0, 0], "fp": cm[0, 1], "fn": cm[1, 0], "tp": cm[1, 1]} + + X, y = make_classification(n_samples=40, n_features=4, random_state=42) + est = LinearSVC(dual="auto", random_state=42) + est.fit(X, y) + cv_results = cross_validate(est, X, y, cv=5, scoring=custom_scorer) + + score_names = ["tn", "fp", "fn", "tp"] + for name in score_names: + assert "test_{}".format(name) in cv_results + + +def test_learning_curve_partial_fit_regressors(): + """Check that regressors with partial_fit is supported. + + Non-regression test for #22981. + """ + X, y = make_regression(random_state=42) + + # Does not error + learning_curve(MLPRegressor(), X, y, exploit_incremental_learning=True, cv=2) + + +def test_learning_curve_some_failing_fits_warning(global_random_seed): + """Checks for fit failures in `learning_curve` and raises the required warning""" + + X, y = make_classification( + n_samples=30, + n_classes=3, + n_informative=6, + shuffle=False, + random_state=global_random_seed, + ) + # sorting the target to trigger SVC error on the 2 first splits because a single + # class is present + sorted_idx = np.argsort(y) + X, y = X[sorted_idx], y[sorted_idx] + + svc = SVC() + warning_message = "10 fits failed out of a total of 25" + + with pytest.warns(FitFailedWarning, match=warning_message): + _, train_score, test_score, *_ = learning_curve( + svc, X, y, cv=5, error_score=np.nan + ) + + # the first 2 splits should lead to warnings and thus np.nan scores + for idx in range(2): + assert np.isnan(train_score[idx]).all() + assert np.isnan(test_score[idx]).all() + + for idx in range(2, train_score.shape[0]): + assert not np.isnan(train_score[idx]).any() + assert not np.isnan(test_score[idx]).any() + + +def test_cross_validate_return_indices(global_random_seed): + """Check the behaviour of `return_indices` in `cross_validate`.""" + X, y = load_iris(return_X_y=True) + X = scale(X) # scale features for better convergence + estimator = LogisticRegression() + + cv = KFold(n_splits=3, shuffle=True, random_state=global_random_seed) + cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=False) + assert "indices" not in cv_results + + cv_results = cross_validate(estimator, X, y, cv=cv, n_jobs=2, return_indices=True) + assert "indices" in cv_results + train_indices = cv_results["indices"]["train"] + test_indices = cv_results["indices"]["test"] + assert len(train_indices) == cv.n_splits + assert len(test_indices) == cv.n_splits + + assert_array_equal([indices.size for indices in train_indices], 100) + assert_array_equal([indices.size for indices in test_indices], 50) + + for split_idx, (expected_train_idx, expected_test_idx) in enumerate(cv.split(X, y)): + assert_array_equal(train_indices[split_idx], expected_train_idx) + assert_array_equal(test_indices[split_idx], expected_test_idx) + + +# Tests for metadata routing in cross_val* +# ======================================== + + +# TODO(1.6): remove this test in 1.6 +def test_cross_validate_fit_param_deprecation(): + """Check that we warn about deprecating `fit_params`.""" + with pytest.warns(FutureWarning, match="`fit_params` is deprecated"): + cross_validate(estimator=ConsumingClassifier(), X=X, y=y, cv=2, fit_params={}) + + with pytest.raises( + ValueError, match="`params` and `fit_params` cannot both be provided" + ): + cross_validate( + estimator=ConsumingClassifier(), X=X, y=y, fit_params={}, params={} + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_groups_with_routing_validation(cv_method): + """Check that we raise an error if `groups` are passed to the cv method instead + of `params` when metadata routing is enabled. + """ + with pytest.raises(ValueError, match="`groups` can only be passed if"): + cv_method( + estimator=ConsumingClassifier(), + X=X, + y=y, + groups=[], + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_passed_unrequested_metadata(cv_method): + """Check that we raise an error when passing metadata that is not + requested.""" + err_msg = re.escape("but are not explicitly set as requested or not requested") + with pytest.raises(ValueError, match=err_msg): + cv_method( + estimator=ConsumingClassifier(), + X=X, + y=y, + params=dict(metadata=[]), + ) + + +@pytest.mark.usefixtures("enable_slep006") +@pytest.mark.parametrize( + "cv_method", [cross_validate, cross_val_score, cross_val_predict] +) +def test_cross_validate_routing(cv_method): + """Check that the respective cv method is properly dispatching the metadata + to the consumer.""" + scorer_registry = _Registry() + scorer = ConsumingScorer(registry=scorer_registry).set_score_request( + sample_weight="score_weights", metadata="score_metadata" + ) + splitter_registry = _Registry() + splitter = ConsumingSplitter(registry=splitter_registry).set_split_request( + groups="split_groups", metadata="split_metadata" + ) + estimator_registry = _Registry() + estimator = ConsumingClassifier(registry=estimator_registry).set_fit_request( + sample_weight="fit_sample_weight", metadata="fit_metadata" + ) + n_samples = _num_samples(X) + rng = np.random.RandomState(0) + score_weights = rng.rand(n_samples) + score_metadata = rng.rand(n_samples) + split_groups = rng.randint(0, 3, n_samples) + split_metadata = rng.rand(n_samples) + fit_sample_weight = rng.rand(n_samples) + fit_metadata = rng.rand(n_samples) + + extra_params = { + cross_validate: dict(scoring=dict(my_scorer=scorer, accuracy="accuracy")), + # cross_val_score doesn't support multiple scorers + cross_val_score: dict(scoring=scorer), + # cross_val_predict doesn't need a scorer + cross_val_predict: dict(), + } + + params = dict( + split_groups=split_groups, + split_metadata=split_metadata, + fit_sample_weight=fit_sample_weight, + fit_metadata=fit_metadata, + ) + + if cv_method is not cross_val_predict: + params.update( + score_weights=score_weights, + score_metadata=score_metadata, + ) + + cv_method( + estimator, + X=X, + y=y, + cv=splitter, + **extra_params[cv_method], + params=params, + ) + + if cv_method is not cross_val_predict: + # cross_val_predict doesn't need a scorer + assert len(scorer_registry) + for _scorer in scorer_registry: + check_recorded_metadata( + obj=_scorer, + method="score", + split_params=("sample_weight", "metadata"), + sample_weight=score_weights, + metadata=score_metadata, + ) + + assert len(splitter_registry) + for _splitter in splitter_registry: + check_recorded_metadata( + obj=_splitter, + method="split", + groups=split_groups, + metadata=split_metadata, + ) + + assert len(estimator_registry) + for _estimator in estimator_registry: + check_recorded_metadata( + obj=_estimator, + method="fit", + split_params=("sample_weight", "metadata"), + sample_weight=fit_sample_weight, + metadata=fit_metadata, + ) + + +# End of metadata routing tests +# ============================= diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__init__.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d8cc67c5322e260ac281e0768a90842449a97519 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/__init__.py @@ -0,0 +1,1299 @@ +""" +The :mod:`sklearn.utils` module includes various utilities. +""" + +import math +import numbers +import platform +import struct +import timeit +import warnings +from collections.abc import Sequence +from contextlib import contextmanager, suppress +from itertools import compress, islice + +import numpy as np +from scipy.sparse import issparse + +from .. import get_config +from ..exceptions import DataConversionWarning +from . import _joblib, metadata_routing +from ._bunch import Bunch +from ._estimator_html_repr import estimator_html_repr +from ._param_validation import Integral, Interval, validate_params +from .class_weight import compute_class_weight, compute_sample_weight +from .deprecation import deprecated +from .discovery import all_estimators +from .fixes import parse_version, threadpool_info +from .murmurhash import murmurhash3_32 +from .validation import ( + _is_arraylike_not_scalar, + _is_pandas_df, + _is_polars_df, + _use_interchange_protocol, + as_float_array, + assert_all_finite, + check_array, + check_consistent_length, + check_random_state, + check_scalar, + check_symmetric, + check_X_y, + column_or_1d, + indexable, +) + +# Do not deprecate parallel_backend and register_parallel_backend as they are +# needed to tune `scikit-learn` behavior and have different effect if called +# from the vendored version or or the site-package version. The other are +# utilities that are independent of scikit-learn so they are not part of +# scikit-learn public API. +parallel_backend = _joblib.parallel_backend +register_parallel_backend = _joblib.register_parallel_backend + +__all__ = [ + "murmurhash3_32", + "as_float_array", + "assert_all_finite", + "check_array", + "check_random_state", + "compute_class_weight", + "compute_sample_weight", + "column_or_1d", + "check_consistent_length", + "check_X_y", + "check_scalar", + "indexable", + "check_symmetric", + "indices_to_mask", + "deprecated", + "parallel_backend", + "register_parallel_backend", + "resample", + "shuffle", + "check_matplotlib_support", + "all_estimators", + "DataConversionWarning", + "estimator_html_repr", + "Bunch", + "metadata_routing", +] + +IS_PYPY = platform.python_implementation() == "PyPy" +_IS_32BIT = 8 * struct.calcsize("P") == 32 +_IS_WASM = platform.machine() in ["wasm32", "wasm64"] + + +def _in_unstable_openblas_configuration(): + """Return True if in an unstable configuration for OpenBLAS""" + + # Import libraries which might load OpenBLAS. + import numpy # noqa + import scipy # noqa + + modules_info = threadpool_info() + + open_blas_used = any(info["internal_api"] == "openblas" for info in modules_info) + if not open_blas_used: + return False + + # OpenBLAS 0.3.16 fixed instability for arm64, see: + # https://github.com/xianyi/OpenBLAS/blob/1b6db3dbba672b4f8af935bd43a1ff6cff4d20b7/Changelog.txt#L56-L58 # noqa + openblas_arm64_stable_version = parse_version("0.3.16") + for info in modules_info: + if info["internal_api"] != "openblas": + continue + openblas_version = info.get("version") + openblas_architecture = info.get("architecture") + if openblas_version is None or openblas_architecture is None: + # Cannot be sure that OpenBLAS is good enough. Assume unstable: + return True + if ( + openblas_architecture == "neoversen1" + and parse_version(openblas_version) < openblas_arm64_stable_version + ): + # See discussions in https://github.com/numpy/numpy/issues/19411 + return True + return False + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "mask": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def safe_mask(X, mask): + """Return a mask which is safe to use on X. + + Parameters + ---------- + X : {array-like, sparse matrix} + Data on which to apply mask. + + mask : array-like + Mask to be used on X. + + Returns + ------- + mask : ndarray + Array that is safe to use on X. + + Examples + -------- + >>> from sklearn.utils import safe_mask + >>> from scipy.sparse import csr_matrix + >>> data = csr_matrix([[1], [2], [3], [4], [5]]) + >>> condition = [False, True, True, False, True] + >>> mask = safe_mask(data, condition) + >>> data[mask].toarray() + array([[2], + [3], + [5]]) + """ + mask = np.asarray(mask) + if np.issubdtype(mask.dtype, np.signedinteger): + return mask + + if hasattr(X, "toarray"): + ind = np.arange(mask.shape[0]) + mask = ind[mask] + return mask + + +def axis0_safe_slice(X, mask, len_mask): + """Return a mask which is safer to use on X than safe_mask. + + This mask is safer than safe_mask since it returns an + empty array, when a sparse matrix is sliced with a boolean mask + with all False, instead of raising an unhelpful error in older + versions of SciPy. + + See: https://github.com/scipy/scipy/issues/5361 + + Also note that we can avoid doing the dot product by checking if + the len_mask is not zero in _huber_loss_and_gradient but this + is not going to be the bottleneck, since the number of outliers + and non_outliers are typically non-zero and it makes the code + tougher to follow. + + Parameters + ---------- + X : {array-like, sparse matrix} + Data on which to apply mask. + + mask : ndarray + Mask to be used on X. + + len_mask : int + The length of the mask. + + Returns + ------- + mask : ndarray + Array that is safe to use on X. + """ + if len_mask != 0: + return X[safe_mask(X, mask), :] + return np.zeros(shape=(0, X.shape[1])) + + +def _array_indexing(array, key, key_dtype, axis): + """Index an array or scipy.sparse consistently across NumPy version.""" + if issparse(array) and key_dtype == "bool": + key = np.asarray(key) + if isinstance(key, tuple): + key = list(key) + return array[key, ...] if axis == 0 else array[:, key] + + +def _pandas_indexing(X, key, key_dtype, axis): + """Index a pandas dataframe or a series.""" + if _is_arraylike_not_scalar(key): + key = np.asarray(key) + + if key_dtype == "int" and not (isinstance(key, slice) or np.isscalar(key)): + # using take() instead of iloc[] ensures the return value is a "proper" + # copy that will not raise SettingWithCopyWarning + return X.take(key, axis=axis) + else: + # check whether we should index with loc or iloc + indexer = X.iloc if key_dtype == "int" else X.loc + return indexer[:, key] if axis else indexer[key] + + +def _list_indexing(X, key, key_dtype): + """Index a Python list.""" + if np.isscalar(key) or isinstance(key, slice): + # key is a slice or a scalar + return X[key] + if key_dtype == "bool": + # key is a boolean array-like + return list(compress(X, key)) + # key is a integer array-like of key + return [X[idx] for idx in key] + + +def _polars_indexing(X, key, key_dtype, axis): + """Indexing X with polars interchange protocol.""" + # Polars behavior is more consistent with lists + if isinstance(key, np.ndarray): + key = key.tolist() + + if axis == 1: + return X[:, key] + else: + return X[key] + + +def _determine_key_type(key, accept_slice=True): + """Determine the data type of key. + + Parameters + ---------- + key : scalar, slice or array-like + The key from which we want to infer the data type. + + accept_slice : bool, default=True + Whether or not to raise an error if the key is a slice. + + Returns + ------- + dtype : {'int', 'str', 'bool', None} + Returns the data type of key. + """ + err_msg = ( + "No valid specification of the columns. Only a scalar, list or " + "slice of all integers or all strings, or boolean mask is " + "allowed" + ) + + dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"} + array_dtype_to_str = { + "i": "int", + "u": "int", + "b": "bool", + "O": "str", + "U": "str", + "S": "str", + } + + if key is None: + return None + if isinstance(key, tuple(dtype_to_str.keys())): + try: + return dtype_to_str[type(key)] + except KeyError: + raise ValueError(err_msg) + if isinstance(key, slice): + if not accept_slice: + raise TypeError( + "Only array-like or scalar are supported. A Python slice was given." + ) + if key.start is None and key.stop is None: + return None + key_start_type = _determine_key_type(key.start) + key_stop_type = _determine_key_type(key.stop) + if key_start_type is not None and key_stop_type is not None: + if key_start_type != key_stop_type: + raise ValueError(err_msg) + if key_start_type is not None: + return key_start_type + return key_stop_type + if isinstance(key, (list, tuple)): + unique_key = set(key) + key_type = {_determine_key_type(elt) for elt in unique_key} + if not key_type: + return None + if len(key_type) != 1: + raise ValueError(err_msg) + return key_type.pop() + if hasattr(key, "dtype"): + try: + return array_dtype_to_str[key.dtype.kind] + except KeyError: + raise ValueError(err_msg) + raise ValueError(err_msg) + + +def _safe_indexing(X, indices, *, axis=0): + """Return rows, items or columns of X using indices. + + .. warning:: + + This utility is documented, but **private**. This means that + backward compatibility might be broken without any deprecation + cycle. + + Parameters + ---------- + X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series + Data from which to sample rows, items or columns. `list` are only + supported when `axis=0`. + indices : bool, int, str, slice, array-like + - If `axis=0`, boolean and integer array-like, integer slice, + and scalar integer are supported. + - If `axis=1`: + - to select a single column, `indices` can be of `int` type for + all `X` types and `str` only for dataframe. The selected subset + will be 1D, unless `X` is a sparse matrix in which case it will + be 2D. + - to select multiples columns, `indices` can be one of the + following: `list`, `array`, `slice`. The type used in + these containers can be one of the following: `int`, 'bool' and + `str`. However, `str` is only supported when `X` is a dataframe. + The selected subset will be 2D. + axis : int, default=0 + The axis along which `X` will be subsampled. `axis=0` will select + rows while `axis=1` will select columns. + + Returns + ------- + subset + Subset of X on axis 0 or 1. + + Notes + ----- + CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are + not supported. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import _safe_indexing + >>> data = np.array([[1, 2], [3, 4], [5, 6]]) + >>> _safe_indexing(data, 0, axis=0) # select the first row + array([1, 2]) + >>> _safe_indexing(data, 0, axis=1) # select the first column + array([1, 3, 5]) + """ + if indices is None: + return X + + if axis not in (0, 1): + raise ValueError( + "'axis' should be either 0 (to index rows) or 1 (to index " + " column). Got {} instead.".format(axis) + ) + + indices_dtype = _determine_key_type(indices) + + if axis == 0 and indices_dtype == "str": + raise ValueError("String indexing is not supported with 'axis=0'") + + if axis == 1 and isinstance(X, list): + raise ValueError("axis=1 is not supported for lists") + + if axis == 1 and hasattr(X, "ndim") and X.ndim != 2: + raise ValueError( + "'X' should be a 2D NumPy array, 2D sparse matrix or pandas " + "dataframe when indexing the columns (i.e. 'axis=1'). " + "Got {} instead with {} dimension(s).".format(type(X), X.ndim) + ) + + if ( + axis == 1 + and indices_dtype == "str" + and not (_is_pandas_df(X) or _use_interchange_protocol(X)) + ): + raise ValueError( + "Specifying the columns using strings is only supported for dataframes." + ) + + if hasattr(X, "iloc"): + # TODO: we should probably use _is_pandas_df(X) instead but this would + # require updating some tests such as test_train_test_split_mock_pandas. + return _pandas_indexing(X, indices, indices_dtype, axis=axis) + elif _is_polars_df(X): + return _polars_indexing(X, indices, indices_dtype, axis=axis) + elif hasattr(X, "shape"): + return _array_indexing(X, indices, indices_dtype, axis=axis) + else: + return _list_indexing(X, indices, indices_dtype) + + +def _safe_assign(X, values, *, row_indexer=None, column_indexer=None): + """Safe assignment to a numpy array, sparse matrix, or pandas dataframe. + + Parameters + ---------- + X : {ndarray, sparse-matrix, dataframe} + Array to be modified. It is expected to be 2-dimensional. + + values : ndarray + The values to be assigned to `X`. + + row_indexer : array-like, dtype={int, bool}, default=None + A 1-dimensional array to select the rows of interest. If `None`, all + rows are selected. + + column_indexer : array-like, dtype={int, bool}, default=None + A 1-dimensional array to select the columns of interest. If `None`, all + columns are selected. + """ + row_indexer = slice(None, None, None) if row_indexer is None else row_indexer + column_indexer = ( + slice(None, None, None) if column_indexer is None else column_indexer + ) + + if hasattr(X, "iloc"): # pandas dataframe + with warnings.catch_warnings(): + # pandas >= 1.5 raises a warning when using iloc to set values in a column + # that does not have the same type as the column being set. It happens + # for instance when setting a categorical column with a string. + # In the future the behavior won't change and the warning should disappear. + # TODO(1.3): check if the warning is still raised or remove the filter. + warnings.simplefilter("ignore", FutureWarning) + X.iloc[row_indexer, column_indexer] = values + else: # numpy array or sparse matrix + X[row_indexer, column_indexer] = values + + +def _get_column_indices_for_bool_or_int(key, n_columns): + # Convert key into list of positive integer indexes + try: + idx = _safe_indexing(np.arange(n_columns), key) + except IndexError as e: + raise ValueError( + f"all features must be in [0, {n_columns - 1}] or [-{n_columns}, 0]" + ) from e + return np.atleast_1d(idx).tolist() + + +def _get_column_indices(X, key): + """Get feature column indices for input data X and key. + + For accepted values of `key`, see the docstring of + :func:`_safe_indexing`. + """ + key_dtype = _determine_key_type(key) + if _use_interchange_protocol(X): + return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype) + + n_columns = X.shape[1] + if isinstance(key, (list, tuple)) and not key: + # we get an empty list + return [] + elif key_dtype in ("bool", "int"): + return _get_column_indices_for_bool_or_int(key, n_columns) + else: + try: + all_columns = X.columns + except AttributeError: + raise ValueError( + "Specifying the columns using strings is only supported for dataframes." + ) + if isinstance(key, str): + columns = [key] + elif isinstance(key, slice): + start, stop = key.start, key.stop + if start is not None: + start = all_columns.get_loc(start) + if stop is not None: + # pandas indexing with strings is endpoint included + stop = all_columns.get_loc(stop) + 1 + else: + stop = n_columns + 1 + return list(islice(range(n_columns), start, stop)) + else: + columns = list(key) + + try: + column_indices = [] + for col in columns: + col_idx = all_columns.get_loc(col) + if not isinstance(col_idx, numbers.Integral): + raise ValueError( + f"Selected columns, {columns}, are not unique in dataframe" + ) + column_indices.append(col_idx) + + except KeyError as e: + raise ValueError("A given column is not a column of the dataframe") from e + + return column_indices + + +def _get_column_indices_interchange(X_interchange, key, key_dtype): + """Same as _get_column_indices but for X with __dataframe__ protocol.""" + + n_columns = X_interchange.num_columns() + + if isinstance(key, (list, tuple)) and not key: + # we get an empty list + return [] + elif key_dtype in ("bool", "int"): + return _get_column_indices_for_bool_or_int(key, n_columns) + else: + column_names = list(X_interchange.column_names()) + + if isinstance(key, slice): + if key.step not in [1, None]: + raise NotImplementedError("key.step must be 1 or None") + start, stop = key.start, key.stop + if start is not None: + start = column_names.index(start) + + if stop is not None: + stop = column_names.index(stop) + 1 + else: + stop = n_columns + 1 + return list(islice(range(n_columns), start, stop)) + + selected_columns = [key] if np.isscalar(key) else key + + try: + return [column_names.index(col) for col in selected_columns] + except ValueError as e: + raise ValueError("A given column is not a column of the dataframe") from e + + +@validate_params( + { + "replace": ["boolean"], + "n_samples": [Interval(numbers.Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + "stratify": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None): + """Resample arrays or sparse matrices in a consistent way. + + The default strategy implements one step of the bootstrapping + procedure. + + Parameters + ---------- + *arrays : sequence of array-like of shape (n_samples,) or \ + (n_samples, n_outputs) + Indexable data-structures can be arrays, lists, dataframes or scipy + sparse matrices with consistent first dimension. + + replace : bool, default=True + Implements resampling with replacement. If False, this will implement + (sliced) random permutations. + + n_samples : int, default=None + Number of samples to generate. If left to None this is + automatically set to the first dimension of the arrays. + If replace is False it should not be larger than the length of + arrays. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for shuffling + the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \ + default=None + If not None, data is split in a stratified fashion, using this as + the class labels. + + Returns + ------- + resampled_arrays : sequence of array-like of shape (n_samples,) or \ + (n_samples, n_outputs) + Sequence of resampled copies of the collections. The original arrays + are not impacted. + + See Also + -------- + shuffle : Shuffle arrays or sparse matrices in a consistent way. + + Examples + -------- + It is possible to mix sparse and dense arrays in the same run:: + + >>> import numpy as np + >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) + >>> y = np.array([0, 1, 2]) + + >>> from scipy.sparse import coo_matrix + >>> X_sparse = coo_matrix(X) + + >>> from sklearn.utils import resample + >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) + >>> X + array([[1., 0.], + [2., 1.], + [1., 0.]]) + + >>> X_sparse + <3x2 sparse matrix of type '<... 'numpy.float64'>' + with 4 stored elements in Compressed Sparse Row format> + + >>> X_sparse.toarray() + array([[1., 0.], + [2., 1.], + [1., 0.]]) + + >>> y + array([0, 1, 0]) + + >>> resample(y, n_samples=2, random_state=0) + array([0, 1]) + + Example using stratification:: + + >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1] + >>> resample(y, n_samples=5, replace=False, stratify=y, + ... random_state=0) + [1, 1, 1, 0, 1] + """ + max_n_samples = n_samples + random_state = check_random_state(random_state) + + if len(arrays) == 0: + return None + + first = arrays[0] + n_samples = first.shape[0] if hasattr(first, "shape") else len(first) + + if max_n_samples is None: + max_n_samples = n_samples + elif (max_n_samples > n_samples) and (not replace): + raise ValueError( + "Cannot sample %d out of arrays with dim %d when replace is False" + % (max_n_samples, n_samples) + ) + + check_consistent_length(*arrays) + + if stratify is None: + if replace: + indices = random_state.randint(0, n_samples, size=(max_n_samples,)) + else: + indices = np.arange(n_samples) + random_state.shuffle(indices) + indices = indices[:max_n_samples] + else: + # Code adapted from StratifiedShuffleSplit() + y = check_array(stratify, ensure_2d=False, dtype=None) + if y.ndim == 2: + # for multi-label y, map each distinct row to a string repr + # using join because str(row) uses an ellipsis if len(row) > 1000 + y = np.array([" ".join(row.astype("str")) for row in y]) + + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + + class_counts = np.bincount(y_indices) + + # Find the sorted list of instances for each class: + # (np.unique above performs a sort, so code is O(n logn) already) + class_indices = np.split( + np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] + ) + + n_i = _approximate_mode(class_counts, max_n_samples, random_state) + + indices = [] + + for i in range(n_classes): + indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace) + indices.extend(indices_i) + + indices = random_state.permutation(indices) + + # convert sparse matrices to CSR for row-based indexing + arrays = [a.tocsr() if issparse(a) else a for a in arrays] + resampled_arrays = [_safe_indexing(a, indices) for a in arrays] + if len(resampled_arrays) == 1: + # syntactic sugar for the unit argument case + return resampled_arrays[0] + else: + return resampled_arrays + + +def shuffle(*arrays, random_state=None, n_samples=None): + """Shuffle arrays or sparse matrices in a consistent way. + + This is a convenience alias to ``resample(*arrays, replace=False)`` to do + random permutations of the collections. + + Parameters + ---------- + *arrays : sequence of indexable data-structures + Indexable data-structures can be arrays, lists, dataframes or scipy + sparse matrices with consistent first dimension. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for shuffling + the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + n_samples : int, default=None + Number of samples to generate. If left to None this is + automatically set to the first dimension of the arrays. It should + not be larger than the length of arrays. + + Returns + ------- + shuffled_arrays : sequence of indexable data-structures + Sequence of shuffled copies of the collections. The original arrays + are not impacted. + + See Also + -------- + resample : Resample arrays or sparse matrices in a consistent way. + + Examples + -------- + It is possible to mix sparse and dense arrays in the same run:: + + >>> import numpy as np + >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) + >>> y = np.array([0, 1, 2]) + + >>> from scipy.sparse import coo_matrix + >>> X_sparse = coo_matrix(X) + + >>> from sklearn.utils import shuffle + >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) + >>> X + array([[0., 0.], + [2., 1.], + [1., 0.]]) + + >>> X_sparse + <3x2 sparse matrix of type '<... 'numpy.float64'>' + with 3 stored elements in Compressed Sparse Row format> + + >>> X_sparse.toarray() + array([[0., 0.], + [2., 1.], + [1., 0.]]) + + >>> y + array([2, 1, 0]) + + >>> shuffle(y, n_samples=2, random_state=0) + array([0, 1]) + """ + return resample( + *arrays, replace=False, n_samples=n_samples, random_state=random_state + ) + + +def safe_sqr(X, *, copy=True): + """Element wise squaring of array-likes and sparse matrices. + + Parameters + ---------- + X : {array-like, ndarray, sparse matrix} + + copy : bool, default=True + Whether to create a copy of X and operate on it or to perform + inplace computation (default behaviour). + + Returns + ------- + X ** 2 : element wise square + Return the element-wise square of the input. + + Examples + -------- + >>> from sklearn.utils import safe_sqr + >>> safe_sqr([1, 2, 3]) + array([1, 4, 9]) + """ + X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False) + if issparse(X): + if copy: + X = X.copy() + X.data **= 2 + else: + if copy: + X = X**2 + else: + X **= 2 + return X + + +def _chunk_generator(gen, chunksize): + """Chunk generator, ``gen`` into lists of length ``chunksize``. The last + chunk may have a length less than ``chunksize``.""" + while True: + chunk = list(islice(gen, chunksize)) + if chunk: + yield chunk + else: + return + + +@validate_params( + { + "n": [Interval(numbers.Integral, 1, None, closed="left")], + "batch_size": [Interval(numbers.Integral, 1, None, closed="left")], + "min_batch_size": [Interval(numbers.Integral, 0, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def gen_batches(n, batch_size, *, min_batch_size=0): + """Generator to create slices containing `batch_size` elements from 0 to `n`. + + The last slice may contain less than `batch_size` elements, when + `batch_size` does not divide `n`. + + Parameters + ---------- + n : int + Size of the sequence. + batch_size : int + Number of elements in each batch. + min_batch_size : int, default=0 + Minimum number of elements in each batch. + + Yields + ------ + slice of `batch_size` elements + + See Also + -------- + gen_even_slices: Generator to create n_packs slices going up to n. + + Examples + -------- + >>> from sklearn.utils import gen_batches + >>> list(gen_batches(7, 3)) + [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] + >>> list(gen_batches(6, 3)) + [slice(0, 3, None), slice(3, 6, None)] + >>> list(gen_batches(2, 3)) + [slice(0, 2, None)] + >>> list(gen_batches(7, 3, min_batch_size=0)) + [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] + >>> list(gen_batches(7, 3, min_batch_size=2)) + [slice(0, 3, None), slice(3, 7, None)] + """ + start = 0 + for _ in range(int(n // batch_size)): + end = start + batch_size + if end + min_batch_size > n: + continue + yield slice(start, end) + start = end + if start < n: + yield slice(start, n) + + +@validate_params( + { + "n": [Interval(Integral, 1, None, closed="left")], + "n_packs": [Interval(Integral, 1, None, closed="left")], + "n_samples": [Interval(Integral, 1, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def gen_even_slices(n, n_packs, *, n_samples=None): + """Generator to create `n_packs` evenly spaced slices going up to `n`. + + If `n_packs` does not divide `n`, except for the first `n % n_packs` + slices, remaining slices may contain fewer elements. + + Parameters + ---------- + n : int + Size of the sequence. + n_packs : int + Number of slices to generate. + n_samples : int, default=None + Number of samples. Pass `n_samples` when the slices are to be used for + sparse matrix indexing; slicing off-the-end raises an exception, while + it works for NumPy arrays. + + Yields + ------ + `slice` representing a set of indices from 0 to n. + + See Also + -------- + gen_batches: Generator to create slices containing batch_size elements + from 0 to n. + + Examples + -------- + >>> from sklearn.utils import gen_even_slices + >>> list(gen_even_slices(10, 1)) + [slice(0, 10, None)] + >>> list(gen_even_slices(10, 10)) + [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] + >>> list(gen_even_slices(10, 5)) + [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] + >>> list(gen_even_slices(10, 3)) + [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] + """ + start = 0 + for pack_num in range(n_packs): + this_n = n // n_packs + if pack_num < n % n_packs: + this_n += 1 + if this_n > 0: + end = start + this_n + if n_samples is not None: + end = min(n_samples, end) + yield slice(start, end, None) + start = end + + +def tosequence(x): + """Cast iterable x to a Sequence, avoiding a copy if possible. + + Parameters + ---------- + x : iterable + The iterable to be converted. + + Returns + ------- + x : Sequence + If `x` is a NumPy array, it returns it as a `ndarray`. If `x` + is a `Sequence`, `x` is returned as-is. If `x` is from any other + type, `x` is returned casted as a list. + """ + if isinstance(x, np.ndarray): + return np.asarray(x) + elif isinstance(x, Sequence): + return x + else: + return list(x) + + +def _to_object_array(sequence): + """Convert sequence to a 1-D NumPy array of object dtype. + + numpy.array constructor has a similar use but it's output + is ambiguous. It can be 1-D NumPy array of object dtype if + the input is a ragged array, but if the input is a list of + equal length arrays, then the output is a 2D numpy.array. + _to_object_array solves this ambiguity by guarantying that + the output is a 1-D NumPy array of objects for any input. + + Parameters + ---------- + sequence : array-like of shape (n_elements,) + The sequence to be converted. + + Returns + ------- + out : ndarray of shape (n_elements,), dtype=object + The converted sequence into a 1-D NumPy array of object dtype. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import _to_object_array + >>> _to_object_array([np.array([0]), np.array([1])]) + array([array([0]), array([1])], dtype=object) + >>> _to_object_array([np.array([0]), np.array([1, 2])]) + array([array([0]), array([1, 2])], dtype=object) + >>> _to_object_array([np.array([0]), np.array([1, 2])]) + array([array([0]), array([1, 2])], dtype=object) + """ + out = np.empty(len(sequence), dtype=object) + out[:] = sequence + return out + + +def indices_to_mask(indices, mask_length): + """Convert list of indices to boolean mask. + + Parameters + ---------- + indices : list-like + List of integers treated as indices. + mask_length : int + Length of boolean mask to be generated. + This parameter must be greater than max(indices). + + Returns + ------- + mask : 1d boolean nd-array + Boolean array that is True where indices are present, else False. + + Examples + -------- + >>> from sklearn.utils import indices_to_mask + >>> indices = [1, 2 , 3, 4] + >>> indices_to_mask(indices, 5) + array([False, True, True, True, True]) + """ + if mask_length <= np.max(indices): + raise ValueError("mask_length must be greater than max(indices)") + + mask = np.zeros(mask_length, dtype=bool) + mask[indices] = True + + return mask + + +def _message_with_time(source, message, time): + """Create one line message for logging purposes. + + Parameters + ---------- + source : str + String indicating the source or the reference of the message. + + message : str + Short message. + + time : int + Time in seconds. + """ + start_message = "[%s] " % source + + # adapted from joblib.logger.short_format_time without the Windows -.1s + # adjustment + if time > 60: + time_str = "%4.1fmin" % (time / 60) + else: + time_str = " %5.1fs" % time + end_message = " %s, total=%s" % (message, time_str) + dots_len = 70 - len(start_message) - len(end_message) + return "%s%s%s" % (start_message, dots_len * ".", end_message) + + +@contextmanager +def _print_elapsed_time(source, message=None): + """Log elapsed time to stdout when the context is exited. + + Parameters + ---------- + source : str + String indicating the source or the reference of the message. + + message : str, default=None + Short message. If None, nothing will be printed. + + Returns + ------- + context_manager + Prints elapsed time upon exit if verbose. + """ + if message is None: + yield + else: + start = timeit.default_timer() + yield + print(_message_with_time(source, message, timeit.default_timer() - start)) + + +def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None): + """Calculate how many rows can be processed within `working_memory`. + + Parameters + ---------- + row_bytes : int + The expected number of bytes of memory that will be consumed + during the processing of each row. + max_n_rows : int, default=None + The maximum return value. + working_memory : int or float, default=None + The number of rows to fit inside this number of MiB will be + returned. When None (default), the value of + ``sklearn.get_config()['working_memory']`` is used. + + Returns + ------- + int + The number of rows which can be processed within `working_memory`. + + Warns + ----- + Issues a UserWarning if `row_bytes exceeds `working_memory` MiB. + """ + + if working_memory is None: + working_memory = get_config()["working_memory"] + + chunk_n_rows = int(working_memory * (2**20) // row_bytes) + if max_n_rows is not None: + chunk_n_rows = min(chunk_n_rows, max_n_rows) + if chunk_n_rows < 1: + warnings.warn( + "Could not adhere to working_memory config. " + "Currently %.0fMiB, %.0fMiB required." + % (working_memory, np.ceil(row_bytes * 2**-20)) + ) + chunk_n_rows = 1 + return chunk_n_rows + + +def _is_pandas_na(x): + """Test if x is pandas.NA. + + We intentionally do not use this function to return `True` for `pd.NA` in + `is_scalar_nan`, because estimators that support `pd.NA` are the exception + rather than the rule at the moment. When `pd.NA` is more universally + supported, we may reconsider this decision. + + Parameters + ---------- + x : any type + + Returns + ------- + boolean + """ + with suppress(ImportError): + from pandas import NA + + return x is NA + + return False + + +def is_scalar_nan(x): + """Test if x is NaN. + + This function is meant to overcome the issue that np.isnan does not allow + non-numerical types as input, and that np.nan is not float('nan'). + + Parameters + ---------- + x : any type + Any scalar value. + + Returns + ------- + bool + Returns true if x is NaN, and false otherwise. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import is_scalar_nan + >>> is_scalar_nan(np.nan) + True + >>> is_scalar_nan(float("nan")) + True + >>> is_scalar_nan(None) + False + >>> is_scalar_nan("") + False + >>> is_scalar_nan([np.nan]) + False + """ + return ( + not isinstance(x, numbers.Integral) + and isinstance(x, numbers.Real) + and math.isnan(x) + ) + + +def _approximate_mode(class_counts, n_draws, rng): + """Computes approximate mode of multivariate hypergeometric. + + This is an approximation to the mode of the multivariate + hypergeometric given by class_counts and n_draws. + It shouldn't be off by more than one. + + It is the mostly likely outcome of drawing n_draws many + samples from the population given by class_counts. + + Parameters + ---------- + class_counts : ndarray of int + Population per class. + n_draws : int + Number of draws (samples to draw) from the overall population. + rng : random state + Used to break ties. + + Returns + ------- + sampled_classes : ndarray of int + Number of samples drawn from each class. + np.sum(sampled_classes) == n_draws + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils import _approximate_mode + >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0) + array([2, 1]) + >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0) + array([3, 1]) + >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), + ... n_draws=2, rng=0) + array([0, 1, 1, 0]) + >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), + ... n_draws=2, rng=42) + array([1, 1, 0, 0]) + """ + rng = check_random_state(rng) + # this computes a bad approximation to the mode of the + # multivariate hypergeometric given by class_counts and n_draws + continuous = class_counts / class_counts.sum() * n_draws + # floored means we don't overshoot n_samples, but probably undershoot + floored = np.floor(continuous) + # we add samples according to how much "left over" probability + # they had, until we arrive at n_samples + need_to_add = int(n_draws - floored.sum()) + if need_to_add > 0: + remainder = continuous - floored + values = np.sort(np.unique(remainder))[::-1] + # add according to remainder, but break ties + # randomly to avoid biases + for value in values: + (inds,) = np.where(remainder == value) + # if we need_to_add less than what's in inds + # we draw randomly from them. + # if we need to add more, we add them all and + # go to the next value + add_now = min(len(inds), need_to_add) + inds = rng.choice(inds, size=add_now, replace=False) + floored[inds] += 1 + need_to_add -= add_now + if need_to_add == 0: + break + return floored.astype(int) + + +def check_matplotlib_support(caller_name): + """Raise ImportError with detailed error message if mpl is not installed. + + Plot utilities like any of the Display's plotting functions should lazily import + matplotlib and call this helper before any computation. + + Parameters + ---------- + caller_name : str + The name of the caller that requires matplotlib. + """ + try: + import matplotlib # noqa + except ImportError as e: + raise ImportError( + "{} requires matplotlib. You can install matplotlib with " + "`pip install matplotlib`".format(caller_name) + ) from e + + +def check_pandas_support(caller_name): + """Raise ImportError with detailed error message if pandas is not installed. + + Plot utilities like :func:`fetch_openml` should lazily import + pandas and call this helper before any computation. + + Parameters + ---------- + caller_name : str + The name of the caller that requires pandas. + + Returns + ------- + pandas + The pandas package. + """ + try: + import pandas # noqa + + return pandas + except ImportError as e: + raise ImportError("{} requires pandas.".format(caller_name)) from e diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_arpack.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_arpack.py new file mode 100644 index 0000000000000000000000000000000000000000..3465ac98c2e81a2fb1ad0c971e6494867d2cec1b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_arpack.py @@ -0,0 +1,30 @@ +from .validation import check_random_state + + +def _init_arpack_v0(size, random_state): + """Initialize the starting vector for iteration in ARPACK functions. + + Initialize a ndarray with values sampled from the uniform distribution on + [-1, 1]. This initialization model has been chosen to be consistent with + the ARPACK one as another initialization can lead to convergence issues. + + Parameters + ---------- + size : int + The size of the eigenvalue vector to be initialized. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator used to generate a + uniform distribution. If int, random_state is the seed used by the + random number generator; If RandomState instance, random_state is the + random number generator; If None, the random number generator is the + RandomState instance used by `np.random`. + + Returns + ------- + v0 : ndarray of shape (size,) + The initialized vector. + """ + random_state = check_random_state(random_state) + v0 = random_state.uniform(-1, 1, size) + return v0 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_array_api.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_array_api.py new file mode 100644 index 0000000000000000000000000000000000000000..0c386a843bffb782eaa2586b5fe8f41bb4096198 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_array_api.py @@ -0,0 +1,575 @@ +"""Tools to support array_api.""" +import itertools +import math +from functools import wraps + +import numpy +import scipy.special as special + +from .._config import get_config +from .fixes import parse_version + + +def yield_namespace_device_dtype_combinations(): + """Yield supported namespace, device, dtype tuples for testing. + + Use this to test that an estimator works with all combinations. + + Returns + ------- + array_namespace : str + The name of the Array API namespace. + + device : str + The name of the device on which to allocate the arrays. Can be None to + indicate that the default value should be used. + + dtype_name : str + The name of the data type to use for arrays. Can be None to indicate + that the default value should be used. + """ + for array_namespace in [ + # The following is used to test the array_api_compat wrapper when + # array_api_dispatch is enabled: in particular, the arrays used in the + # tests are regular numpy arrays without any "device" attribute. + "numpy", + # Stricter NumPy-based Array API implementation. The + # numpy.array_api.Array instances always a dummy "device" attribute. + "numpy.array_api", + "cupy", + "cupy.array_api", + "torch", + ]: + if array_namespace == "torch": + for device, dtype in itertools.product( + ("cpu", "cuda"), ("float64", "float32") + ): + yield array_namespace, device, dtype + yield array_namespace, "mps", "float32" + else: + yield array_namespace, None, None + + +def _check_array_api_dispatch(array_api_dispatch): + """Check that array_api_compat is installed and NumPy version is compatible. + + array_api_compat follows NEP29, which has a higher minimum NumPy version than + scikit-learn. + """ + if array_api_dispatch: + try: + import array_api_compat # noqa + except ImportError: + raise ImportError( + "array_api_compat is required to dispatch arrays using the API" + " specification" + ) + + numpy_version = parse_version(numpy.__version__) + min_numpy_version = "1.21" + if numpy_version < parse_version(min_numpy_version): + raise ImportError( + f"NumPy must be {min_numpy_version} or newer to dispatch array using" + " the API specification" + ) + + +def device(x): + """Hardware device the array data resides on. + + Parameters + ---------- + x : array + Array instance from NumPy or an array API compatible library. + + Returns + ------- + out : device + `device` object (see the "Device Support" section of the array API spec). + """ + if isinstance(x, (numpy.ndarray, numpy.generic)): + return "cpu" + return x.device + + +def size(x): + """Return the total number of elements of x. + + Parameters + ---------- + x : array + Array instance from NumPy or an array API compatible library. + + Returns + ------- + out : int + Total number of elements. + """ + return math.prod(x.shape) + + +def _is_numpy_namespace(xp): + """Return True if xp is backed by NumPy.""" + return xp.__name__ in {"numpy", "array_api_compat.numpy", "numpy.array_api"} + + +def _union1d(a, b, xp): + if _is_numpy_namespace(xp): + return xp.asarray(numpy.union1d(a, b)) + assert a.ndim == b.ndim == 1 + return xp.unique_values(xp.concat([xp.unique_values(a), xp.unique_values(b)])) + + +def isdtype(dtype, kind, *, xp): + """Returns a boolean indicating whether a provided dtype is of type "kind". + + Included in the v2022.12 of the Array API spec. + https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html + """ + if isinstance(kind, tuple): + return any(_isdtype_single(dtype, k, xp=xp) for k in kind) + else: + return _isdtype_single(dtype, kind, xp=xp) + + +def _isdtype_single(dtype, kind, *, xp): + if isinstance(kind, str): + if kind == "bool": + return dtype == xp.bool + elif kind == "signed integer": + return dtype in {xp.int8, xp.int16, xp.int32, xp.int64} + elif kind == "unsigned integer": + return dtype in {xp.uint8, xp.uint16, xp.uint32, xp.uint64} + elif kind == "integral": + return any( + _isdtype_single(dtype, k, xp=xp) + for k in ("signed integer", "unsigned integer") + ) + elif kind == "real floating": + return dtype in supported_float_dtypes(xp) + elif kind == "complex floating": + # Some name spaces do not have complex, such as cupy.array_api + # and numpy.array_api + complex_dtypes = set() + if hasattr(xp, "complex64"): + complex_dtypes.add(xp.complex64) + if hasattr(xp, "complex128"): + complex_dtypes.add(xp.complex128) + return dtype in complex_dtypes + elif kind == "numeric": + return any( + _isdtype_single(dtype, k, xp=xp) + for k in ("integral", "real floating", "complex floating") + ) + else: + raise ValueError(f"Unrecognized data type kind: {kind!r}") + else: + return dtype == kind + + +def supported_float_dtypes(xp): + """Supported floating point types for the namespace + + Note: float16 is not officially part of the Array API spec at the + time of writing but scikit-learn estimators and functions can choose + to accept it when xp.float16 is defined. + + https://data-apis.org/array-api/latest/API_specification/data_types.html + """ + if hasattr(xp, "float16"): + return (xp.float64, xp.float32, xp.float16) + else: + return (xp.float64, xp.float32) + + +class _ArrayAPIWrapper: + """sklearn specific Array API compatibility wrapper + + This wrapper makes it possible for scikit-learn maintainers to + deal with discrepancies between different implementations of the + Python Array API standard and its evolution over time. + + The Python Array API standard specification: + https://data-apis.org/array-api/latest/ + + Documentation of the NumPy implementation: + https://numpy.org/neps/nep-0047-array-api-standard.html + """ + + def __init__(self, array_namespace): + self._namespace = array_namespace + + def __getattr__(self, name): + return getattr(self._namespace, name) + + def __eq__(self, other): + return self._namespace == other._namespace + + def isdtype(self, dtype, kind): + return isdtype(dtype, kind, xp=self._namespace) + + +def _check_device_cpu(device): # noqa + if device not in {"cpu", None}: + raise ValueError(f"Unsupported device for NumPy: {device!r}") + + +def _accept_device_cpu(func): + @wraps(func) + def wrapped_func(*args, **kwargs): + _check_device_cpu(kwargs.pop("device", None)) + return func(*args, **kwargs) + + return wrapped_func + + +class _NumPyAPIWrapper: + """Array API compat wrapper for any numpy version + + NumPy < 1.22 does not expose the numpy.array_api namespace. This + wrapper makes it possible to write code that uses the standard + Array API while working with any version of NumPy supported by + scikit-learn. + + See the `get_namespace()` public function for more details. + """ + + # Creation functions in spec: + # https://data-apis.org/array-api/latest/API_specification/creation_functions.html + _CREATION_FUNCS = { + "arange", + "empty", + "empty_like", + "eye", + "full", + "full_like", + "linspace", + "ones", + "ones_like", + "zeros", + "zeros_like", + } + # Data types in spec + # https://data-apis.org/array-api/latest/API_specification/data_types.html + _DTYPES = { + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + # XXX: float16 is not part of the Array API spec but exposed by + # some namespaces. + "float16", + "float32", + "float64", + "complex64", + "complex128", + } + + def __getattr__(self, name): + attr = getattr(numpy, name) + + # Support device kwargs and make sure they are on the CPU + if name in self._CREATION_FUNCS: + return _accept_device_cpu(attr) + + # Convert to dtype objects + if name in self._DTYPES: + return numpy.dtype(attr) + return attr + + @property + def bool(self): + return numpy.bool_ + + def astype(self, x, dtype, *, copy=True, casting="unsafe"): + # astype is not defined in the top level NumPy namespace + return x.astype(dtype, copy=copy, casting=casting) + + def asarray(self, x, *, dtype=None, device=None, copy=None): # noqa + _check_device_cpu(device) + # Support copy in NumPy namespace + if copy is True: + return numpy.array(x, copy=True, dtype=dtype) + else: + return numpy.asarray(x, dtype=dtype) + + def unique_inverse(self, x): + return numpy.unique(x, return_inverse=True) + + def unique_counts(self, x): + return numpy.unique(x, return_counts=True) + + def unique_values(self, x): + return numpy.unique(x) + + def concat(self, arrays, *, axis=None): + return numpy.concatenate(arrays, axis=axis) + + def reshape(self, x, shape, *, copy=None): + """Gives a new shape to an array without changing its data. + + The Array API specification requires shape to be a tuple. + https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html + """ + if not isinstance(shape, tuple): + raise TypeError( + f"shape must be a tuple, got {shape!r} of type {type(shape)}" + ) + + if copy is True: + x = x.copy() + return numpy.reshape(x, shape) + + def isdtype(self, dtype, kind): + return isdtype(dtype, kind, xp=self) + + +_NUMPY_API_WRAPPER_INSTANCE = _NumPyAPIWrapper() + + +def get_namespace(*arrays): + """Get namespace of arrays. + + Introspect `arrays` arguments and return their common Array API + compatible namespace object, if any. NumPy 1.22 and later can + construct such containers using the `numpy.array_api` namespace + for instance. + + See: https://numpy.org/neps/nep-0047-array-api-standard.html + + If `arrays` are regular numpy arrays, an instance of the + `_NumPyAPIWrapper` compatibility wrapper is returned instead. + + Namespace support is not enabled by default. To enabled it + call: + + sklearn.set_config(array_api_dispatch=True) + + or: + + with sklearn.config_context(array_api_dispatch=True): + # your code here + + Otherwise an instance of the `_NumPyAPIWrapper` + compatibility wrapper is always returned irrespective of + the fact that arrays implement the `__array_namespace__` + protocol or not. + + Parameters + ---------- + *arrays : array objects + Array objects. + + Returns + ------- + namespace : module + Namespace shared by array objects. If any of the `arrays` are not arrays, + the namespace defaults to NumPy. + + is_array_api_compliant : bool + True if the arrays are containers that implement the Array API spec. + Always False when array_api_dispatch=False. + """ + array_api_dispatch = get_config()["array_api_dispatch"] + if not array_api_dispatch: + return _NUMPY_API_WRAPPER_INSTANCE, False + + _check_array_api_dispatch(array_api_dispatch) + + # array-api-compat is a required dependency of scikit-learn only when + # configuring `array_api_dispatch=True`. Its import should therefore be + # protected by _check_array_api_dispatch to display an informative error + # message in case it is missing. + import array_api_compat + + namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True + + # These namespaces need additional wrapping to smooth out small differences + # between implementations + if namespace.__name__ in {"numpy.array_api", "cupy.array_api"}: + namespace = _ArrayAPIWrapper(namespace) + + return namespace, is_array_api_compliant + + +def _expit(X): + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + return xp.asarray(special.expit(numpy.asarray(X))) + + return 1.0 / (1.0 + xp.exp(-X)) + + +def _add_to_diagonal(array, value, xp): + # Workaround for the lack of support for xp.reshape(a, shape, copy=False) in + # numpy.array_api: https://github.com/numpy/numpy/issues/23410 + value = xp.asarray(value, dtype=array.dtype) + if _is_numpy_namespace(xp): + array_np = numpy.asarray(array) + array_np.flat[:: array.shape[0] + 1] += value + return xp.asarray(array_np) + elif value.ndim == 1: + for i in range(array.shape[0]): + array[i, i] += value[i] + else: + # scalar value + for i in range(array.shape[0]): + array[i, i] += value + + +def _weighted_sum(sample_score, sample_weight, normalize=False, xp=None): + # XXX: this function accepts Array API input but returns a Python scalar + # float. The call to float() is convenient because it removes the need to + # move back results from device to host memory (e.g. calling `.cpu()` on a + # torch tensor). However, this might interact in unexpected ways (break?) + # with lazy Array API implementations. See: + # https://github.com/data-apis/array-api/issues/642 + if xp is None: + xp, _ = get_namespace(sample_score) + if normalize and _is_numpy_namespace(xp): + sample_score_np = numpy.asarray(sample_score) + if sample_weight is not None: + sample_weight_np = numpy.asarray(sample_weight) + else: + sample_weight_np = None + return float(numpy.average(sample_score_np, weights=sample_weight_np)) + + if not xp.isdtype(sample_score.dtype, "real floating"): + # We move to cpu device ahead of time since certain devices may not support + # float64, but we want the same precision for all devices and namespaces. + sample_score = xp.astype(xp.asarray(sample_score, device="cpu"), xp.float64) + + if sample_weight is not None: + sample_weight = xp.asarray( + sample_weight, dtype=sample_score.dtype, device=device(sample_score) + ) + if not xp.isdtype(sample_weight.dtype, "real floating"): + sample_weight = xp.astype(sample_weight, xp.float64) + + if normalize: + if sample_weight is not None: + scale = xp.sum(sample_weight) + else: + scale = sample_score.shape[0] + if scale != 0: + sample_score = sample_score / scale + + if sample_weight is not None: + return float(sample_score @ sample_weight) + else: + return float(xp.sum(sample_score)) + + +def _nanmin(X, axis=None): + # TODO: refactor once nan-aware reductions are standardized: + # https://github.com/data-apis/array-api/issues/621 + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + return xp.asarray(numpy.nanmin(X, axis=axis)) + + else: + mask = xp.isnan(X) + X = xp.min(xp.where(mask, xp.asarray(+xp.inf, device=device(X)), X), axis=axis) + # Replace Infs from all NaN slices with NaN again + mask = xp.all(mask, axis=axis) + if xp.any(mask): + X = xp.where(mask, xp.asarray(xp.nan), X) + return X + + +def _nanmax(X, axis=None): + # TODO: refactor once nan-aware reductions are standardized: + # https://github.com/data-apis/array-api/issues/621 + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + return xp.asarray(numpy.nanmax(X, axis=axis)) + + else: + mask = xp.isnan(X) + X = xp.max(xp.where(mask, xp.asarray(-xp.inf, device=device(X)), X), axis=axis) + # Replace Infs from all NaN slices with NaN again + mask = xp.all(mask, axis=axis) + if xp.any(mask): + X = xp.where(mask, xp.asarray(xp.nan), X) + return X + + +def _asarray_with_order(array, dtype=None, order=None, copy=None, *, xp=None): + """Helper to support the order kwarg only for NumPy-backed arrays + + Memory layout parameter `order` is not exposed in the Array API standard, + however some input validation code in scikit-learn needs to work both + for classes and functions that will leverage Array API only operations + and for code that inherently relies on NumPy backed data containers with + specific memory layout constraints (e.g. our own Cython code). The + purpose of this helper is to make it possible to share code for data + container validation without memory copies for both downstream use cases: + the `order` parameter is only enforced if the input array implementation + is NumPy based, otherwise `order` is just silently ignored. + """ + if xp is None: + xp, _ = get_namespace(array) + if _is_numpy_namespace(xp): + # Use NumPy API to support order + if copy is True: + array = numpy.array(array, order=order, dtype=dtype) + else: + array = numpy.asarray(array, order=order, dtype=dtype) + + # At this point array is a NumPy ndarray. We convert it to an array + # container that is consistent with the input's namespace. + return xp.asarray(array) + else: + return xp.asarray(array, dtype=dtype, copy=copy) + + +def _convert_to_numpy(array, xp): + """Convert X into a NumPy ndarray on the CPU.""" + xp_name = xp.__name__ + + if xp_name in {"array_api_compat.torch", "torch"}: + return array.cpu().numpy() + elif xp_name == "cupy.array_api": + return array._array.get() + elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover + return array.get() + + return numpy.asarray(array) + + +def _estimator_with_converted_arrays(estimator, converter): + """Create new estimator which converting all attributes that are arrays. + + The converter is called on all NumPy arrays and arrays that support the + `DLPack interface `__. + + Parameters + ---------- + estimator : Estimator + Estimator to convert + + converter : callable + Callable that takes an array attribute and returns the converted array. + + Returns + ------- + new_estimator : Estimator + Convert estimator + """ + from sklearn.base import clone + + new_estimator = clone(estimator) + for key, attribute in vars(estimator).items(): + if hasattr(attribute, "__dlpack__") or isinstance(attribute, numpy.ndarray): + attribute = converter(attribute) + setattr(new_estimator, key, attribute) + return new_estimator + + +def _atol_for_type(dtype): + """Return the absolute tolerance for a given dtype.""" + return numpy.finfo(dtype).eps * 100 diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_available_if.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_available_if.py new file mode 100644 index 0000000000000000000000000000000000000000..2d9598df9de7e8e1c0d85640f278b5e669302094 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_available_if.py @@ -0,0 +1,93 @@ +from functools import update_wrapper, wraps +from types import MethodType + + +class _AvailableIfDescriptor: + """Implements a conditional property using the descriptor protocol. + + Using this class to create a decorator will raise an ``AttributeError`` + if check(self) returns a falsey value. Note that if check raises an error + this will also result in hasattr returning false. + + See https://docs.python.org/3/howto/descriptor.html for an explanation of + descriptors. + """ + + def __init__(self, fn, check, attribute_name): + self.fn = fn + self.check = check + self.attribute_name = attribute_name + + # update the docstring of the descriptor + update_wrapper(self, fn) + + def _check(self, obj, owner): + attr_err_msg = ( + f"This {repr(owner.__name__)} has no attribute {repr(self.attribute_name)}" + ) + try: + check_result = self.check(obj) + except Exception as e: + raise AttributeError(attr_err_msg) from e + + if not check_result: + raise AttributeError(attr_err_msg) + + def __get__(self, obj, owner=None): + if obj is not None: + # delegate only on instances, not the classes. + # this is to allow access to the docstrings. + self._check(obj, owner=owner) + out = MethodType(self.fn, obj) + + else: + # This makes it possible to use the decorated method as an unbound method, + # for instance when monkeypatching. + @wraps(self.fn) + def out(*args, **kwargs): + self._check(args[0], owner=owner) + return self.fn(*args, **kwargs) + + return out + + +def available_if(check): + """An attribute that is available only if check returns a truthy value. + + Parameters + ---------- + check : callable + When passed the object with the decorated method, this should return + a truthy value if the attribute is available, and either return False + or raise an AttributeError if not available. + + Returns + ------- + callable + Callable makes the decorated method available if `check` returns + a truthy value, otherwise the decorated method is unavailable. + + Examples + -------- + >>> from sklearn.utils.metaestimators import available_if + >>> class HelloIfEven: + ... def __init__(self, x): + ... self.x = x + ... + ... def _x_is_even(self): + ... return self.x % 2 == 0 + ... + ... @available_if(_x_is_even) + ... def say_hello(self): + ... print("Hello") + ... + >>> obj = HelloIfEven(1) + >>> hasattr(obj, "say_hello") + False + >>> obj.x = 2 + >>> hasattr(obj, "say_hello") + True + >>> obj.say_hello() + Hello + """ + return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_bunch.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_bunch.py new file mode 100644 index 0000000000000000000000000000000000000000..d90aeb7d93c74d483254901d51da9d82d39cfe6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_bunch.py @@ -0,0 +1,67 @@ +import warnings + + +class Bunch(dict): + """Container object exposing keys as attributes. + + Bunch objects are sometimes used as an output for functions and methods. + They extend dictionaries by enabling values to be accessed by key, + `bunch["value_key"]`, or by an attribute, `bunch.value_key`. + + Examples + -------- + >>> from sklearn.utils import Bunch + >>> b = Bunch(a=1, b=2) + >>> b['b'] + 2 + >>> b.b + 2 + >>> b.a = 3 + >>> b['a'] + 3 + >>> b.c = 6 + >>> b['c'] + 6 + """ + + def __init__(self, **kwargs): + super().__init__(kwargs) + + # Map from deprecated key to warning message + self.__dict__["_deprecated_key_to_warnings"] = {} + + def __getitem__(self, key): + if key in self.__dict__.get("_deprecated_key_to_warnings", {}): + warnings.warn( + self._deprecated_key_to_warnings[key], + FutureWarning, + ) + return super().__getitem__(key) + + def _set_deprecated(self, value, *, new_key, deprecated_key, warning_message): + """Set key in dictionary to be deprecated with its warning message.""" + self.__dict__["_deprecated_key_to_warnings"][deprecated_key] = warning_message + self[new_key] = self[deprecated_key] = value + + def __setattr__(self, key, value): + self[key] = value + + def __dir__(self): + return self.keys() + + def __getattr__(self, key): + try: + return self[key] + except KeyError: + raise AttributeError(key) + + def __setstate__(self, state): + # Bunch pickles generated with scikit-learn 0.16.* have an non + # empty __dict__. This causes a surprising behaviour when + # loading these pickles scikit-learn 0.17: reading bunch.key + # uses __dict__ but assigning to bunch.key use __setattr__ and + # only changes bunch['key']. More details can be found at: + # https://github.com/scikit-learn/scikit-learn/issues/6196. + # Overriding __setstate__ to be a noop has the effect of + # ignoring the pickled __dict__ + pass diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1187eb49d25d4248cda1cc7d06f62583b1761f49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_cython_blas.pxd @@ -0,0 +1,41 @@ +from cython cimport floating + + +cpdef enum BLAS_Order: + RowMajor # C contiguous + ColMajor # Fortran contiguous + + +cpdef enum BLAS_Trans: + NoTrans = 110 # correspond to 'n' + Trans = 116 # correspond to 't' + + +# BLAS Level 1 ################################################################ +cdef floating _dot(int, const floating*, int, const floating*, int) noexcept nogil + +cdef floating _asum(int, const floating*, int) noexcept nogil + +cdef void _axpy(int, floating, const floating*, int, floating*, int) noexcept nogil + +cdef floating _nrm2(int, const floating*, int) noexcept nogil + +cdef void _copy(int, const floating*, int, const floating*, int) noexcept nogil + +cdef void _scal(int, floating, const floating*, int) noexcept nogil + +cdef void _rotg(floating*, floating*, floating*, floating*) noexcept nogil + +cdef void _rot(int, floating*, int, floating*, int, floating, floating) noexcept nogil + +# BLAS Level 2 ################################################################ +cdef void _gemv(BLAS_Order, BLAS_Trans, int, int, floating, const floating*, int, + const floating*, int, floating, floating*, int) noexcept nogil + +cdef void _ger(BLAS_Order, int, int, floating, const floating*, int, const floating*, + int, floating*, int) noexcept nogil + +# BLASLevel 3 ################################################################ +cdef void _gemm(BLAS_Order, BLAS_Trans, BLAS_Trans, int, int, int, floating, + const floating*, int, const floating*, int, floating, floating*, + int) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css new file mode 100644 index 0000000000000000000000000000000000000000..3f29c70eddefc51c25d30d4f5472e3b848d60632 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_estimator_html_repr.css @@ -0,0 +1,404 @@ +#$id { + /* Definition of color scheme common for light and dark mode */ + --sklearn-color-text: black; + --sklearn-color-line: gray; + /* Definition of color scheme for unfitted estimators */ + --sklearn-color-unfitted-level-0: #fff5e6; + --sklearn-color-unfitted-level-1: #f6e4d2; + --sklearn-color-unfitted-level-2: #ffe0b3; + --sklearn-color-unfitted-level-3: chocolate; + /* Definition of color scheme for fitted estimators */ + --sklearn-color-fitted-level-0: #f0f8ff; + --sklearn-color-fitted-level-1: #d4ebff; + --sklearn-color-fitted-level-2: #b3dbfd; + --sklearn-color-fitted-level-3: cornflowerblue; + + /* Specific color for light theme */ + --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black))); + --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, white))); + --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black))); + --sklearn-color-icon: #696969; + + @media (prefers-color-scheme: dark) { + /* Redefinition of color scheme for dark theme */ + --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white))); + --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, #111))); + --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white))); + --sklearn-color-icon: #878787; + } +} + +#$id { + color: var(--sklearn-color-text); +} + +#$id pre { + padding: 0; +} + +#$id input.sk-hidden--visually { + border: 0; + clip: rect(1px 1px 1px 1px); + clip: rect(1px, 1px, 1px, 1px); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0; + position: absolute; + width: 1px; +} + +#$id div.sk-dashed-wrapped { + border: 1px dashed var(--sklearn-color-line); + margin: 0 0.4em 0.5em 0.4em; + box-sizing: border-box; + padding-bottom: 0.4em; + background-color: var(--sklearn-color-background); +} + +#$id div.sk-container { + /* jupyter's `normalize.less` sets `[hidden] { display: none; }` + but bootstrap.min.css set `[hidden] { display: none !important; }` + so we also need the `!important` here to be able to override the + default hidden behavior on the sphinx rendered scikit-learn.org. + See: https://github.com/scikit-learn/scikit-learn/issues/21755 */ + display: inline-block !important; + position: relative; +} + +#$id div.sk-text-repr-fallback { + display: none; +} + +div.sk-parallel-item, +div.sk-serial, +div.sk-item { + /* draw centered vertical line to link estimators */ + background-image: linear-gradient(var(--sklearn-color-text-on-default-background), var(--sklearn-color-text-on-default-background)); + background-size: 2px 100%; + background-repeat: no-repeat; + background-position: center center; +} + +/* Parallel-specific style estimator block */ + +#$id div.sk-parallel-item::after { + content: ""; + width: 100%; + border-bottom: 2px solid var(--sklearn-color-text-on-default-background); + flex-grow: 1; +} + +#$id div.sk-parallel { + display: flex; + align-items: stretch; + justify-content: center; + background-color: var(--sklearn-color-background); + position: relative; +} + +#$id div.sk-parallel-item { + display: flex; + flex-direction: column; +} + +#$id div.sk-parallel-item:first-child::after { + align-self: flex-end; + width: 50%; +} + +#$id div.sk-parallel-item:last-child::after { + align-self: flex-start; + width: 50%; +} + +#$id div.sk-parallel-item:only-child::after { + width: 0; +} + +/* Serial-specific style estimator block */ + +#$id div.sk-serial { + display: flex; + flex-direction: column; + align-items: center; + background-color: var(--sklearn-color-background); + padding-right: 1em; + padding-left: 1em; +} + + +/* Toggleable style: style used for estimator/Pipeline/ColumnTransformer box that is +clickable and can be expanded/collapsed. +- Pipeline and ColumnTransformer use this feature and define the default style +- Estimators will overwrite some part of the style using the `sk-estimator` class +*/ + +/* Pipeline and ColumnTransformer style (default) */ + +#$id div.sk-toggleable { + /* Default theme specific background. It is overwritten whether we have a + specific estimator or a Pipeline/ColumnTransformer */ + background-color: var(--sklearn-color-background); +} + +/* Toggleable label */ +#$id label.sk-toggleable__label { + cursor: pointer; + display: block; + width: 100%; + margin-bottom: 0; + padding: 0.5em; + box-sizing: border-box; + text-align: center; +} + +#$id label.sk-toggleable__label-arrow:before { + /* Arrow on the left of the label */ + content: "▸"; + float: left; + margin-right: 0.25em; + color: var(--sklearn-color-icon); +} + +#$id label.sk-toggleable__label-arrow:hover:before { + color: var(--sklearn-color-text); +} + +/* Toggleable content - dropdown */ + +#$id div.sk-toggleable__content { + max-height: 0; + max-width: 0; + overflow: hidden; + text-align: left; + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-0); +} + +#$id div.sk-toggleable__content.fitted { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-0); +} + +#$id div.sk-toggleable__content pre { + margin: 0.2em; + border-radius: 0.25em; + color: var(--sklearn-color-text); + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-0); +} + +#$id div.sk-toggleable__content.fitted pre { + /* unfitted */ + background-color: var(--sklearn-color-fitted-level-0); +} + +#$id input.sk-toggleable__control:checked~div.sk-toggleable__content { + /* Expand drop-down */ + max-height: 200px; + max-width: 100%; + overflow: auto; +} + +#$id input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before { + content: "▾"; +} + +/* Pipeline/ColumnTransformer-specific style */ + +#$id div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label { + color: var(--sklearn-color-text); + background-color: var(--sklearn-color-unfitted-level-2); +} + +#$id div.sk-label.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label { + background-color: var(--sklearn-color-fitted-level-2); +} + +/* Estimator-specific style */ + +/* Colorize estimator box */ +#$id div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-2); +} + +#$id div.sk-estimator.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-2); +} + +#$id div.sk-label label.sk-toggleable__label, +#$id div.sk-label label { + /* The background is the default theme color */ + color: var(--sklearn-color-text-on-default-background); +} + +/* On hover, darken the color of the background */ +#$id div.sk-label:hover label.sk-toggleable__label { + color: var(--sklearn-color-text); + background-color: var(--sklearn-color-unfitted-level-2); +} + +/* Label box, darken color on hover, fitted */ +#$id div.sk-label.fitted:hover label.sk-toggleable__label.fitted { + color: var(--sklearn-color-text); + background-color: var(--sklearn-color-fitted-level-2); +} + +/* Estimator label */ + +#$id div.sk-label label { + font-family: monospace; + font-weight: bold; + display: inline-block; + line-height: 1.2em; +} + +#$id div.sk-label-container { + text-align: center; +} + +/* Estimator-specific */ +#$id div.sk-estimator { + font-family: monospace; + border: 1px dotted var(--sklearn-color-border-box); + border-radius: 0.25em; + box-sizing: border-box; + margin-bottom: 0.5em; + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-0); +} + +#$id div.sk-estimator.fitted { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-0); +} + +/* on hover */ +#$id div.sk-estimator:hover { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-2); +} + +#$id div.sk-estimator.fitted:hover { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-2); +} + +/* Specification for estimator info (e.g. "i" and "?") */ + +/* Common style for "i" and "?" */ + +.sk-estimator-doc-link, +a:link.sk-estimator-doc-link, +a:visited.sk-estimator-doc-link { + float: right; + font-size: smaller; + line-height: 1em; + font-family: monospace; + background-color: var(--sklearn-color-background); + border-radius: 1em; + height: 1em; + width: 1em; + text-decoration: none !important; + margin-left: 1ex; + /* unfitted */ + border: var(--sklearn-color-unfitted-level-1) 1pt solid; + color: var(--sklearn-color-unfitted-level-1); +} + +.sk-estimator-doc-link.fitted, +a:link.sk-estimator-doc-link.fitted, +a:visited.sk-estimator-doc-link.fitted { + /* fitted */ + border: var(--sklearn-color-fitted-level-1) 1pt solid; + color: var(--sklearn-color-fitted-level-1); +} + +/* On hover */ +div.sk-estimator:hover .sk-estimator-doc-link:hover, +.sk-estimator-doc-link:hover, +div.sk-label-container:hover .sk-estimator-doc-link:hover, +.sk-estimator-doc-link:hover { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-3); + color: var(--sklearn-color-background); + text-decoration: none; +} + +div.sk-estimator.fitted:hover .sk-estimator-doc-link.fitted:hover, +.sk-estimator-doc-link.fitted:hover, +div.sk-label-container:hover .sk-estimator-doc-link.fitted:hover, +.sk-estimator-doc-link.fitted:hover { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-3); + color: var(--sklearn-color-background); + text-decoration: none; +} + +/* Span, style for the box shown on hovering the info icon */ +.sk-estimator-doc-link span { + display: none; + z-index: 9999; + position: relative; + font-weight: normal; + right: .2ex; + padding: .5ex; + margin: .5ex; + width: min-content; + min-width: 20ex; + max-width: 50ex; + color: var(--sklearn-color-text); + box-shadow: 2pt 2pt 4pt #999; + /* unfitted */ + background: var(--sklearn-color-unfitted-level-0); + border: .5pt solid var(--sklearn-color-unfitted-level-3); +} + +.sk-estimator-doc-link.fitted span { + /* fitted */ + background: var(--sklearn-color-fitted-level-0); + border: var(--sklearn-color-fitted-level-3); +} + +.sk-estimator-doc-link:hover span { + display: block; +} + +/* "?"-specific style due to the `` HTML tag */ + +#$id a.estimator_doc_link { + float: right; + font-size: 1rem; + line-height: 1em; + font-family: monospace; + background-color: var(--sklearn-color-background); + border-radius: 1rem; + height: 1rem; + width: 1rem; + text-decoration: none; + /* unfitted */ + color: var(--sklearn-color-unfitted-level-1); + border: var(--sklearn-color-unfitted-level-1) 1pt solid; +} + +#$id a.estimator_doc_link.fitted { + /* fitted */ + border: var(--sklearn-color-fitted-level-1) 1pt solid; + color: var(--sklearn-color-fitted-level-1); +} + +/* On hover */ +#$id a.estimator_doc_link:hover { + /* unfitted */ + background-color: var(--sklearn-color-unfitted-level-3); + color: var(--sklearn-color-background); + text-decoration: none; +} + +#$id a.estimator_doc_link.fitted:hover { + /* fitted */ + background-color: var(--sklearn-color-fitted-level-3); +} diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4a9d6ef4eb7b74a7cba19ec5d62ccc748dbeb768 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_fast_dict.pxd @@ -0,0 +1,18 @@ +# Author: Gael Varoquaux +# License: BSD +""" +Uses C++ map containers for fast dict-like behavior with keys being +integers, and values float. +""" + +from libcpp.map cimport map as cpp_map + +from ._typedefs cimport float64_t, intp_t + + +############################################################################### +# An object to be used in Python + +cdef class IntFloatDict: + cdef cpp_map[intp_t, float64_t] my_map + cdef _to_arrays(self, intp_t [:] keys, float64_t [:] values) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..951995a44e1ca06d3af4db07d35737112f2f2fc0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.pxd new file mode 100644 index 0000000000000000000000000000000000000000..39de4dc02d315f8decd1fa06f430759eaa57e68e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_heap.pxd @@ -0,0 +1,14 @@ +# Heap routines, used in various Cython implementations. + +from cython cimport floating + +from ._typedefs cimport intp_t + + +cdef int heap_push( + floating* values, + intp_t* indices, + intp_t size, + floating val, + intp_t val_idx, +) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mask.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..07332bf1edbd4a21b14c974937ba80ff3cbad13f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_mask.py @@ -0,0 +1,63 @@ +from contextlib import suppress + +import numpy as np +from scipy import sparse as sp + +from . import is_scalar_nan +from .fixes import _object_dtype_isnan + + +def _get_dense_mask(X, value_to_mask): + with suppress(ImportError, AttributeError): + # We also suppress `AttributeError` because older versions of pandas do + # not have `NA`. + import pandas + + if value_to_mask is pandas.NA: + return pandas.isna(X) + + if is_scalar_nan(value_to_mask): + if X.dtype.kind == "f": + Xt = np.isnan(X) + elif X.dtype.kind in ("i", "u"): + # can't have NaNs in integer array. + Xt = np.zeros(X.shape, dtype=bool) + else: + # np.isnan does not work on object dtypes. + Xt = _object_dtype_isnan(X) + else: + Xt = X == value_to_mask + + return Xt + + +def _get_mask(X, value_to_mask): + """Compute the boolean mask X == value_to_mask. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples, n_features) + Input data, where ``n_samples`` is the number of samples and + ``n_features`` is the number of features. + + value_to_mask : {int, float} + The value which is to be masked in X. + + Returns + ------- + X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features) + Missing mask. + """ + if not sp.issparse(X): + # For all cases apart of a sparse input where we need to reconstruct + # a sparse output + return _get_dense_mask(X, value_to_mask) + + Xt = _get_dense_mask(X.data, value_to_mask) + + sparse_constructor = sp.csr_matrix if X.format == "csr" else sp.csc_matrix + Xt_sparse = sparse_constructor( + (Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool + ) + + return Xt_sparse diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py new file mode 100644 index 0000000000000000000000000000000000000000..8b99012d7b0fbc7759f3f50d746d96aa355b757d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_metadata_requests.py @@ -0,0 +1,1563 @@ +""" +Metadata Routing Utility + +In order to better understand the components implemented in this file, one +needs to understand their relationship to one another. + +The only relevant public API for end users are the ``set_{method}_request``, +e.g. ``estimator.set_fit_request(sample_weight=True)``. However, third-party +developers and users who implement custom meta-estimators, need to deal with +the objects implemented in this file. + +All estimators (should) implement a ``get_metadata_routing`` method, returning +the routing requests set for the estimator. This method is automatically +implemented via ``BaseEstimator`` for all simple estimators, but needs a custom +implementation for meta-estimators. + +In non-routing consumers, i.e. the simplest case, e.g. ``SVM``, +``get_metadata_routing`` returns a ``MetadataRequest`` object. + +In routers, e.g. meta-estimators and a multi metric scorer, +``get_metadata_routing`` returns a ``MetadataRouter`` object. + +An object which is both a router and a consumer, e.g. a meta-estimator which +consumes ``sample_weight`` and routes ``sample_weight`` to its sub-estimators, +routing information includes both information about the object itself (added +via ``MetadataRouter.add_self_request``), as well as the routing information +for its sub-estimators. + +A ``MetadataRequest`` instance includes one ``MethodMetadataRequest`` per +method in ``METHODS``, which includes ``fit``, ``score``, etc. + +Request values are added to the routing mechanism by adding them to +``MethodMetadataRequest`` instances, e.g. +``metadatarequest.fit.add(param="sample_weight", alias="my_weights")``. This is +used in ``set_{method}_request`` which are automatically generated, so users +and developers almost never need to directly call methods on a +``MethodMetadataRequest``. + +The ``alias`` above in the ``add`` method has to be either a string (an alias), +or a {True (requested), False (unrequested), None (error if passed)}``. There +are some other special values such as ``UNUSED`` and ``WARN`` which are used +for purposes such as warning of removing a metadata in a child class, but not +used by the end users. + +``MetadataRouter`` includes information about sub-objects' routing and how +methods are mapped together. For instance, the information about which methods +of a sub-estimator are called in which methods of the meta-estimator are all +stored here. Conceptually, this information looks like: + +``` +{ + "sub_estimator1": ( + mapping=[(caller="fit", callee="transform"), ...], + router=MetadataRequest(...), # or another MetadataRouter + ), + ... +} +``` + +To give the above representation some structure, we use the following objects: + +- ``(caller, callee)`` is a namedtuple called ``MethodPair`` + +- The list of ``MethodPair`` stored in the ``mapping`` field is a + ``MethodMapping`` object + +- ``(mapping=..., router=...)`` is a namedtuple called ``RouterMappingPair`` + +The ``set_{method}_request`` methods are dynamically generated for estimators +which inherit from the ``BaseEstimator``. This is done by attaching instances +of the ``RequestMethod`` descriptor to classes, which is done in the +``_MetadataRequester`` class, and ``BaseEstimator`` inherits from this mixin. +This mixin also implements the ``get_metadata_routing``, which meta-estimators +need to override, but it works for simple consumers as is. +""" + +# Author: Adrin Jalali +# License: BSD 3 clause + +import inspect +from collections import namedtuple +from copy import deepcopy +from typing import TYPE_CHECKING, Optional, Union +from warnings import warn + +from .. import get_config +from ..exceptions import UnsetMetadataPassedError +from ._bunch import Bunch + +# Only the following methods are supported in the routing mechanism. Adding new +# methods at the moment involves monkeypatching this list. +# Note that if this list is changed or monkeypatched, the corresponding method +# needs to be added under a TYPE_CHECKING condition like the one done here in +# _MetadataRequester +SIMPLE_METHODS = [ + "fit", + "partial_fit", + "predict", + "predict_proba", + "predict_log_proba", + "decision_function", + "score", + "split", + "transform", + "inverse_transform", +] + +# These methods are a composite of other methods and one cannot set their +# requests directly. Instead they should be set by setting the requests of the +# simple methods which make the composite ones. +COMPOSITE_METHODS = { + "fit_transform": ["fit", "transform"], + "fit_predict": ["fit", "predict"], +} + +METHODS = SIMPLE_METHODS + list(COMPOSITE_METHODS.keys()) + + +def _routing_enabled(): + """Return whether metadata routing is enabled. + + .. versionadded:: 1.3 + + Returns + ------- + enabled : bool + Whether metadata routing is enabled. If the config is not set, it + defaults to False. + """ + return get_config().get("enable_metadata_routing", False) + + +def _raise_for_params(params, owner, method): + """Raise an error if metadata routing is not enabled and params are passed. + + .. versionadded:: 1.4 + + Parameters + ---------- + params : dict + The metadata passed to a method. + + owner : object + The object to which the method belongs. + + method : str + The name of the method, e.g. "fit". + + Raises + ------ + ValueError + If metadata routing is not enabled and params are passed. + """ + caller = ( + f"{owner.__class__.__name__}.{method}" if method else owner.__class__.__name__ + ) + if not _routing_enabled() and params: + raise ValueError( + f"Passing extra keyword arguments to {caller} is only supported if" + " enable_metadata_routing=True, which you can set using" + " `sklearn.set_config`. See the User Guide" + " for more" + f" details. Extra parameters passed are: {set(params)}" + ) + + +def _raise_for_unsupported_routing(obj, method, **kwargs): + """Raise when metadata routing is enabled and metadata is passed. + + This is used in meta-estimators which have not implemented metadata routing + to prevent silent bugs. There is no need to use this function if the + meta-estimator is not accepting any metadata, especially in `fit`, since + if a meta-estimator accepts any metadata, they would do that in `fit` as + well. + + Parameters + ---------- + obj : estimator + The estimator for which we're raising the error. + + method : str + The method where the error is raised. + + **kwargs : dict + The metadata passed to the method. + """ + kwargs = {key: value for key, value in kwargs.items() if value is not None} + if _routing_enabled() and kwargs: + cls_name = obj.__class__.__name__ + raise NotImplementedError( + f"{cls_name}.{method} cannot accept given metadata ({set(kwargs.keys())})" + f" since metadata routing is not yet implemented for {cls_name}." + ) + + +class _RoutingNotSupportedMixin: + """A mixin to be used to remove the default `get_metadata_routing`. + + This is used in meta-estimators where metadata routing is not yet + implemented. + + This also makes it clear in our rendered documentation that this method + cannot be used. + """ + + def get_metadata_routing(self): + """Raise `NotImplementedError`. + + This estimator does not support metadata routing yet.""" + raise NotImplementedError( + f"{self.__class__.__name__} has not implemented metadata routing yet." + ) + + +# Request values +# ============== +# Each request value needs to be one of the following values, or an alias. + +# this is used in `__metadata_request__*` attributes to indicate that a +# metadata is not present even though it may be present in the +# corresponding method's signature. +UNUSED = "$UNUSED$" + +# this is used whenever a default value is changed, and therefore the user +# should explicitly set the value, otherwise a warning is shown. An example +# is when a meta-estimator is only a router, but then becomes also a +# consumer in a new release. +WARN = "$WARN$" + +# this is the default used in `set_{method}_request` methods to indicate no +# change requested by the user. +UNCHANGED = "$UNCHANGED$" + +VALID_REQUEST_VALUES = [False, True, None, UNUSED, WARN] + + +def request_is_alias(item): + """Check if an item is a valid alias. + + Values in ``VALID_REQUEST_VALUES`` are not considered aliases in this + context. Only a string which is a valid identifier is. + + Parameters + ---------- + item : object + The given item to be checked if it can be an alias. + + Returns + ------- + result : bool + Whether the given item is a valid alias. + """ + if item in VALID_REQUEST_VALUES: + return False + + # item is only an alias if it's a valid identifier + return isinstance(item, str) and item.isidentifier() + + +def request_is_valid(item): + """Check if an item is a valid request value (and not an alias). + + Parameters + ---------- + item : object + The given item to be checked. + + Returns + ------- + result : bool + Whether the given item is valid. + """ + return item in VALID_REQUEST_VALUES + + +# Metadata Request for Simple Consumers +# ===================================== +# This section includes MethodMetadataRequest and MetadataRequest which are +# used in simple consumers. + + +class MethodMetadataRequest: + """A prescription of how metadata is to be passed to a single method. + + Refer to :class:`MetadataRequest` for how this class is used. + + .. versionadded:: 1.3 + + Parameters + ---------- + owner : str + A display name for the object owning these requests. + + method : str + The name of the method to which these requests belong. + + requests : dict of {str: bool, None or str}, default=None + The initial requests for this method. + """ + + def __init__(self, owner, method, requests=None): + self._requests = requests or dict() + self.owner = owner + self.method = method + + @property + def requests(self): + """Dictionary of the form: ``{key: alias}``.""" + return self._requests + + def add_request( + self, + *, + param, + alias, + ): + """Add request info for a metadata. + + Parameters + ---------- + param : str + The property for which a request is set. + + alias : str, or {True, False, None} + Specifies which metadata should be routed to `param` + + - str: the name (or alias) of metadata given to a meta-estimator that + should be routed to this parameter. + + - True: requested + + - False: not requested + + - None: error if passed + """ + if not request_is_alias(alias) and not request_is_valid(alias): + raise ValueError( + f"The alias you're setting for `{param}` should be either a " + "valid identifier or one of {None, True, False}, but given " + f"value is: `{alias}`" + ) + + if alias == param: + alias = True + + if alias == UNUSED: + if param in self._requests: + del self._requests[param] + else: + raise ValueError( + f"Trying to remove parameter {param} with UNUSED which doesn't" + " exist." + ) + else: + self._requests[param] = alias + + return self + + def _get_param_names(self, return_alias): + """Get names of all metadata that can be consumed or routed by this method. + + This method returns the names of all metadata, even the ``False`` + ones. + + Parameters + ---------- + return_alias : bool + Controls whether original or aliased names should be returned. If + ``False``, aliases are ignored and original names are returned. + + Returns + ------- + names : set of str + A set of strings with the names of all parameters. + """ + return set( + alias if return_alias and not request_is_valid(alias) else prop + for prop, alias in self._requests.items() + if not request_is_valid(alias) or alias is not False + ) + + def _check_warnings(self, *, params): + """Check whether metadata is passed which is marked as WARN. + + If any metadata is passed which is marked as WARN, a warning is raised. + + Parameters + ---------- + params : dict + The metadata passed to a method. + """ + params = {} if params is None else params + warn_params = { + prop + for prop, alias in self._requests.items() + if alias == WARN and prop in params + } + for param in warn_params: + warn( + f"Support for {param} has recently been added to this class. " + "To maintain backward compatibility, it is ignored now. " + "You can set the request value to False to silence this " + "warning, or to True to consume and use the metadata." + ) + + def _route_params(self, params): + """Prepare the given parameters to be passed to the method. + + The output of this method can be used directly as the input to the + corresponding method as extra props. + + Parameters + ---------- + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the + corresponding method. + """ + self._check_warnings(params=params) + unrequested = dict() + args = {arg: value for arg, value in params.items() if value is not None} + res = Bunch() + for prop, alias in self._requests.items(): + if alias is False or alias == WARN: + continue + elif alias is True and prop in args: + res[prop] = args[prop] + elif alias is None and prop in args: + unrequested[prop] = args[prop] + elif alias in args: + res[prop] = args[alias] + if unrequested: + raise UnsetMetadataPassedError( + message=( + f"[{', '.join([key for key in unrequested])}] are passed but are" + " not explicitly set as requested or not for" + f" {self.owner}.{self.method}" + ), + unrequested_params=unrequested, + routed_params=res, + ) + return res + + def _consumes(self, params): + """Check whether the given parameters are consumed by this method. + + Parameters + ---------- + params : iterable of str + An iterable of parameters to check. + + Returns + ------- + consumed : set of str + A set of parameters which are consumed by this method. + """ + params = set(params) + res = set() + for prop, alias in self._requests.items(): + if alias is True and prop in params: + res.add(prop) + elif isinstance(alias, str) and alias in params: + res.add(alias) + return res + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : dict + A serialized version of the instance in the form of a dictionary. + """ + return self._requests + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +class MetadataRequest: + """Contains the metadata request info of a consumer. + + Instances of `MethodMetadataRequest` are used in this class for each + available method under `metadatarequest.{method}`. + + Consumer-only classes such as simple estimators return a serialized + version of this class as the output of `get_metadata_routing()`. + + .. versionadded:: 1.3 + + Parameters + ---------- + owner : str + The name of the object to which these requests belong. + """ + + # this is here for us to use this attribute's value instead of doing + # `isinstance` in our checks, so that we avoid issues when people vendor + # this file instead of using it directly from scikit-learn. + _type = "metadata_request" + + def __init__(self, owner): + self.owner = owner + for method in SIMPLE_METHODS: + setattr( + self, + method, + MethodMetadataRequest(owner=owner, method=method), + ) + + def consumes(self, method, params): + """Check whether the given parameters are consumed by the given method. + + .. versionadded:: 1.4 + + Parameters + ---------- + method : str + The name of the method to check. + + params : iterable of str + An iterable of parameters to check. + + Returns + ------- + consumed : set of str + A set of parameters which are consumed by the given method. + """ + return getattr(self, method)._consumes(params=params) + + def __getattr__(self, name): + # Called when the default attribute access fails with an AttributeError + # (either __getattribute__() raises an AttributeError because name is + # not an instance attribute or an attribute in the class tree for self; + # or __get__() of a name property raises AttributeError). This method + # should either return the (computed) attribute value or raise an + # AttributeError exception. + # https://docs.python.org/3/reference/datamodel.html#object.__getattr__ + if name not in COMPOSITE_METHODS: + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{name}'" + ) + + requests = {} + for method in COMPOSITE_METHODS[name]: + mmr = getattr(self, method) + existing = set(requests.keys()) + upcoming = set(mmr.requests.keys()) + common = existing & upcoming + conflicts = [key for key in common if requests[key] != mmr._requests[key]] + if conflicts: + raise ValueError( + f"Conflicting metadata requests for {', '.join(conflicts)} while" + f" composing the requests for {name}. Metadata with the same name" + f" for methods {', '.join(COMPOSITE_METHODS[name])} should have the" + " same request value." + ) + requests.update(mmr._requests) + return MethodMetadataRequest(owner=self.owner, method=name, requests=requests) + + def _get_param_names(self, method, return_alias, ignore_self_request=None): + """Get names of all metadata that can be consumed or routed by specified \ + method. + + This method returns the names of all metadata, even the ``False`` + ones. + + Parameters + ---------- + method : str + The name of the method for which metadata names are requested. + + return_alias : bool + Controls whether original or aliased names should be returned. If + ``False``, aliases are ignored and original names are returned. + + ignore_self_request : bool + Ignored. Present for API compatibility. + + Returns + ------- + names : set of str + A set of strings with the names of all parameters. + """ + return getattr(self, method)._get_param_names(return_alias=return_alias) + + def _route_params(self, *, method, params): + """Prepare the given parameters to be passed to the method. + + The output of this method can be used directly as the input to the + corresponding method as extra keyword arguments to pass metadata. + + Parameters + ---------- + method : str + The name of the method for which the parameters are requested and + routed. + + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the + corresponding method. + """ + return getattr(self, method)._route_params(params=params) + + def _check_warnings(self, *, method, params): + """Check whether metadata is passed which is marked as WARN. + + If any metadata is passed which is marked as WARN, a warning is raised. + + Parameters + ---------- + method : str + The name of the method for which the warnings should be checked. + + params : dict + The metadata passed to a method. + """ + getattr(self, method)._check_warnings(params=params) + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : dict + A serialized version of the instance in the form of a dictionary. + """ + output = dict() + for method in SIMPLE_METHODS: + mmr = getattr(self, method) + if len(mmr.requests): + output[method] = mmr._serialize() + return output + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +# Metadata Request for Routers +# ============================ +# This section includes all objects required for MetadataRouter which is used +# in routers, returned by their ``get_metadata_routing``. + +# This namedtuple is used to store a (mapping, routing) pair. Mapping is a +# MethodMapping object, and routing is the output of `get_metadata_routing`. +# MetadataRouter stores a collection of these namedtuples. +RouterMappingPair = namedtuple("RouterMappingPair", ["mapping", "router"]) + +# A namedtuple storing a single method route. A collection of these namedtuples +# is stored in a MetadataRouter. +MethodPair = namedtuple("MethodPair", ["callee", "caller"]) + + +class MethodMapping: + """Stores the mapping between callee and caller methods for a router. + + This class is primarily used in a ``get_metadata_routing()`` of a router + object when defining the mapping between a sub-object (a sub-estimator or a + scorer) to the router's methods. It stores a collection of ``Route`` + namedtuples. + + Iterating through an instance of this class will yield named + ``MethodPair(callee, caller)`` tuples. + + .. versionadded:: 1.3 + """ + + def __init__(self): + self._routes = [] + + def __iter__(self): + return iter(self._routes) + + def add(self, *, callee, caller): + """Add a method mapping. + + Parameters + ---------- + callee : str + Child object's method name. This method is called in ``caller``. + + caller : str + Parent estimator's method name in which the ``callee`` is called. + + Returns + ------- + self : MethodMapping + Returns self. + """ + if callee not in METHODS: + raise ValueError( + f"Given callee:{callee} is not a valid method. Valid methods are:" + f" {METHODS}" + ) + if caller not in METHODS: + raise ValueError( + f"Given caller:{caller} is not a valid method. Valid methods are:" + f" {METHODS}" + ) + self._routes.append(MethodPair(callee=callee, caller=caller)) + return self + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : list + A serialized version of the instance in the form of a list. + """ + result = list() + for route in self._routes: + result.append({"callee": route.callee, "caller": route.caller}) + return result + + @classmethod + def from_str(cls, route): + """Construct an instance from a string. + + Parameters + ---------- + route : str + A string representing the mapping, it can be: + + - `"one-to-one"`: a one to one mapping for all methods. + - `"method"`: the name of a single method, such as ``fit``, + ``transform``, ``score``, etc. + + Returns + ------- + obj : MethodMapping + A :class:`~sklearn.utils.metadata_routing.MethodMapping` instance + constructed from the given string. + """ + routing = cls() + if route == "one-to-one": + for method in METHODS: + routing.add(callee=method, caller=method) + elif route in METHODS: + routing.add(callee=route, caller=route) + else: + raise ValueError("route should be 'one-to-one' or a single method!") + return routing + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +class MetadataRouter: + """Stores and handles metadata routing for a router object. + + This class is used by router objects to store and handle metadata routing. + Routing information is stored as a dictionary of the form ``{"object_name": + RouteMappingPair(method_mapping, routing_info)}``, where ``method_mapping`` + is an instance of :class:`~sklearn.utils.metadata_routing.MethodMapping` and + ``routing_info`` is either a + :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a + :class:`~sklearn.utils.metadata_routing.MetadataRouter` instance. + + .. versionadded:: 1.3 + + Parameters + ---------- + owner : str + The name of the object to which these requests belong. + """ + + # this is here for us to use this attribute's value instead of doing + # `isinstance`` in our checks, so that we avoid issues when people vendor + # this file instead of using it directly from scikit-learn. + _type = "metadata_router" + + def __init__(self, owner): + self._route_mappings = dict() + # `_self_request` is used if the router is also a consumer. + # _self_request, (added using `add_self_request()`) is treated + # differently from the other objects which are stored in + # _route_mappings. + self._self_request = None + self.owner = owner + + def add_self_request(self, obj): + """Add `self` (as a consumer) to the routing. + + This method is used if the router is also a consumer, and hence the + router itself needs to be included in the routing. The passed object + can be an estimator or a + :class:`~sklearn.utils.metadata_routing.MetadataRequest`. + + A router should add itself using this method instead of `add` since it + should be treated differently than the other objects to which metadata + is routed by the router. + + Parameters + ---------- + obj : object + This is typically the router instance, i.e. `self` in a + ``get_metadata_routing()`` implementation. It can also be a + ``MetadataRequest`` instance. + + Returns + ------- + self : MetadataRouter + Returns `self`. + """ + if getattr(obj, "_type", None) == "metadata_request": + self._self_request = deepcopy(obj) + elif hasattr(obj, "_get_metadata_request"): + self._self_request = deepcopy(obj._get_metadata_request()) + else: + raise ValueError( + "Given `obj` is neither a `MetadataRequest` nor does it implement the" + " required API. Inheriting from `BaseEstimator` implements the required" + " API." + ) + return self + + def add(self, *, method_mapping, **objs): + """Add named objects with their corresponding method mapping. + + Parameters + ---------- + method_mapping : MethodMapping or str + The mapping between the child and the parent's methods. If str, the + output of :func:`~sklearn.utils.metadata_routing.MethodMapping.from_str` + is used. + + **objs : dict + A dictionary of objects from which metadata is extracted by calling + :func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them. + + Returns + ------- + self : MetadataRouter + Returns `self`. + """ + if isinstance(method_mapping, str): + method_mapping = MethodMapping.from_str(method_mapping) + else: + method_mapping = deepcopy(method_mapping) + + for name, obj in objs.items(): + self._route_mappings[name] = RouterMappingPair( + mapping=method_mapping, router=get_routing_for_object(obj) + ) + return self + + def consumes(self, method, params): + """Check whether the given parameters are consumed by the given method. + + .. versionadded:: 1.4 + + Parameters + ---------- + method : str + The name of the method to check. + + params : iterable of str + An iterable of parameters to check. + + Returns + ------- + consumed : set of str + A set of parameters which are consumed by the given method. + """ + res = set() + if self._self_request: + res = res | self._self_request.consumes(method=method, params=params) + + for _, route_mapping in self._route_mappings.items(): + for callee, caller in route_mapping.mapping: + if caller == method: + res = res | route_mapping.router.consumes( + method=callee, params=params + ) + + return res + + def _get_param_names(self, *, method, return_alias, ignore_self_request): + """Get names of all metadata that can be consumed or routed by specified \ + method. + + This method returns the names of all metadata, even the ``False`` + ones. + + Parameters + ---------- + method : str + The name of the method for which metadata names are requested. + + return_alias : bool + Controls whether original or aliased names should be returned, + which only applies to the stored `self`. If no `self` routing + object is stored, this parameter has no effect. + + ignore_self_request : bool + If `self._self_request` should be ignored. This is used in `_route_params`. + If ``True``, ``return_alias`` has no effect. + + Returns + ------- + names : set of str + A set of strings with the names of all parameters. + """ + res = set() + if self._self_request and not ignore_self_request: + res = res.union( + self._self_request._get_param_names( + method=method, return_alias=return_alias + ) + ) + + for name, route_mapping in self._route_mappings.items(): + for callee, caller in route_mapping.mapping: + if caller == method: + res = res.union( + route_mapping.router._get_param_names( + method=callee, return_alias=True, ignore_self_request=False + ) + ) + return res + + def _route_params(self, *, params, method): + """Prepare the given parameters to be passed to the method. + + This is used when a router is used as a child object of another router. + The parent router then passes all parameters understood by the child + object to it and delegates their validation to the child. + + The output of this method can be used directly as the input to the + corresponding method as extra props. + + Parameters + ---------- + method : str + The name of the method for which the parameters are requested and + routed. + + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the + corresponding method. + """ + res = Bunch() + if self._self_request: + res.update(self._self_request._route_params(params=params, method=method)) + + param_names = self._get_param_names( + method=method, return_alias=True, ignore_self_request=True + ) + child_params = { + key: value for key, value in params.items() if key in param_names + } + for key in set(res.keys()).intersection(child_params.keys()): + # conflicts are okay if the passed objects are the same, but it's + # an issue if they're different objects. + if child_params[key] is not res[key]: + raise ValueError( + f"In {self.owner}, there is a conflict on {key} between what is" + " requested for this estimator and what is requested by its" + " children. You can resolve this conflict by using an alias for" + " the child estimator(s) requested metadata." + ) + + res.update(child_params) + return res + + def route_params(self, *, caller, params): + """Return the input parameters requested by child objects. + + The output of this method is a bunch, which includes the inputs for all + methods of each child object that are used in the router's `caller` + method. + + If the router is also a consumer, it also checks for warnings of + `self`'s/consumer's requested metadata. + + Parameters + ---------- + caller : str + The name of the method for which the parameters are requested and + routed. If called inside the :term:`fit` method of a router, it + would be `"fit"`. + + params : dict + A dictionary of provided metadata. + + Returns + ------- + params : Bunch + A :class:`~sklearn.utils.Bunch` of the form + ``{"object_name": {"method_name": {prop: value}}}`` which can be + used to pass the required metadata to corresponding methods or + corresponding child objects. + """ + if self._self_request: + self._self_request._check_warnings(params=params, method=caller) + + res = Bunch() + for name, route_mapping in self._route_mappings.items(): + router, mapping = route_mapping.router, route_mapping.mapping + + res[name] = Bunch() + for _callee, _caller in mapping: + if _caller == caller: + res[name][_callee] = router._route_params( + params=params, method=_callee + ) + return res + + def validate_metadata(self, *, method, params): + """Validate given metadata for a method. + + This raises a ``TypeError`` if some of the passed metadata are not + understood by child objects. + + Parameters + ---------- + method : str + The name of the method for which the parameters are requested and + routed. If called inside the :term:`fit` method of a router, it + would be `"fit"`. + + params : dict + A dictionary of provided metadata. + """ + param_names = self._get_param_names( + method=method, return_alias=False, ignore_self_request=False + ) + if self._self_request: + self_params = self._self_request._get_param_names( + method=method, return_alias=False + ) + else: + self_params = set() + extra_keys = set(params.keys()) - param_names - self_params + if extra_keys: + raise TypeError( + f"{self.owner}.{method} got unexpected argument(s) {extra_keys}, which" + " are not requested metadata in any object." + ) + + def _serialize(self): + """Serialize the object. + + Returns + ------- + obj : dict + A serialized version of the instance in the form of a dictionary. + """ + res = dict() + if self._self_request: + res["$self_request"] = self._self_request._serialize() + for name, route_mapping in self._route_mappings.items(): + res[name] = dict() + res[name]["mapping"] = route_mapping.mapping._serialize() + res[name]["router"] = route_mapping.router._serialize() + + return res + + def __iter__(self): + if self._self_request: + yield ( + "$self_request", + RouterMappingPair( + mapping=MethodMapping.from_str("one-to-one"), + router=self._self_request, + ), + ) + for name, route_mapping in self._route_mappings.items(): + yield (name, route_mapping) + + def __repr__(self): + return str(self._serialize()) + + def __str__(self): + return str(repr(self)) + + +def get_routing_for_object(obj=None): + """Get a ``Metadata{Router, Request}`` instance from the given object. + + This function returns a + :class:`~sklearn.utils.metadata_routing.MetadataRouter` or a + :class:`~sklearn.utils.metadata_routing.MetadataRequest` from the given input. + + This function always returns a copy or an instance constructed from the + input, such that changing the output of this function will not change the + original object. + + .. versionadded:: 1.3 + + Parameters + ---------- + obj : object + - If the object is already a + :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a + :class:`~sklearn.utils.metadata_routing.MetadataRouter`, return a copy + of that. + - If the object provides a `get_metadata_routing` method, return a copy + of the output of that method. + - Returns an empty :class:`~sklearn.utils.metadata_routing.MetadataRequest` + otherwise. + + Returns + ------- + obj : MetadataRequest or MetadataRouting + A ``MetadataRequest`` or a ``MetadataRouting`` taken or created from + the given object. + """ + # doing this instead of a try/except since an AttributeError could be raised + # for other reasons. + if hasattr(obj, "get_metadata_routing"): + return deepcopy(obj.get_metadata_routing()) + + elif getattr(obj, "_type", None) in ["metadata_request", "metadata_router"]: + return deepcopy(obj) + + return MetadataRequest(owner=None) + + +# Request method +# ============== +# This section includes what's needed for the request method descriptor and +# their dynamic generation in a meta class. + +# These strings are used to dynamically generate the docstrings for +# set_{method}_request methods. +REQUESTER_DOC = """ Request metadata passed to the ``{method}`` method. + + Note that this method is only relevant if + ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). + Please see :ref:`User Guide ` on how the routing + mechanism works. + + The options for each parameter are: + + - ``True``: metadata is requested, and \ +passed to ``{method}`` if provided. The request is ignored if \ +metadata is not provided. + + - ``False``: metadata is not requested and the meta-estimator \ +will not pass it to ``{method}``. + + - ``None``: metadata is not requested, and the meta-estimator \ +will raise an error if the user provides it. + + - ``str``: metadata should be passed to the meta-estimator with \ +this given alias instead of the original name. + + The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the + existing request. This allows you to change the request for some + parameters and not others. + + .. versionadded:: 1.3 + + .. note:: + This method is only relevant if this estimator is used as a + sub-estimator of a meta-estimator, e.g. used inside a + :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. + + Parameters + ---------- +""" +REQUESTER_DOC_PARAM = """ {metadata} : str, True, False, or None, \ + default=sklearn.utils.metadata_routing.UNCHANGED + Metadata routing for ``{metadata}`` parameter in ``{method}``. + +""" +REQUESTER_DOC_RETURN = """ Returns + ------- + self : object + The updated object. +""" + + +class RequestMethod: + """ + A descriptor for request methods. + + .. versionadded:: 1.3 + + Parameters + ---------- + name : str + The name of the method for which the request function should be + created, e.g. ``"fit"`` would create a ``set_fit_request`` function. + + keys : list of str + A list of strings which are accepted parameters by the created + function, e.g. ``["sample_weight"]`` if the corresponding method + accepts it as a metadata. + + validate_keys : bool, default=True + Whether to check if the requested parameters fit the actual parameters + of the method. + + Notes + ----- + This class is a descriptor [1]_ and uses PEP-362 to set the signature of + the returned function [2]_. + + References + ---------- + .. [1] https://docs.python.org/3/howto/descriptor.html + + .. [2] https://www.python.org/dev/peps/pep-0362/ + """ + + def __init__(self, name, keys, validate_keys=True): + self.name = name + self.keys = keys + self.validate_keys = validate_keys + + def __get__(self, instance, owner): + # we would want to have a method which accepts only the expected args + def func(**kw): + """Updates the request for provided parameters + + This docstring is overwritten below. + See REQUESTER_DOC for expected functionality + """ + if not _routing_enabled(): + raise RuntimeError( + "This method is only available when metadata routing is enabled." + " You can enable it using" + " sklearn.set_config(enable_metadata_routing=True)." + ) + + if self.validate_keys and (set(kw) - set(self.keys)): + raise TypeError( + f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" + f" are: {set(self.keys)}" + ) + + requests = instance._get_metadata_request() + method_metadata_request = getattr(requests, self.name) + + for prop, alias in kw.items(): + if alias is not UNCHANGED: + method_metadata_request.add_request(param=prop, alias=alias) + instance._metadata_request = requests + + return instance + + # Now we set the relevant attributes of the function so that it seems + # like a normal method to the end user, with known expected arguments. + func.__name__ = f"set_{self.name}_request" + params = [ + inspect.Parameter( + name="self", + kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=owner, + ) + ] + params.extend( + [ + inspect.Parameter( + k, + inspect.Parameter.KEYWORD_ONLY, + default=UNCHANGED, + annotation=Optional[Union[bool, None, str]], + ) + for k in self.keys + ] + ) + func.__signature__ = inspect.Signature( + params, + return_annotation=owner, + ) + doc = REQUESTER_DOC.format(method=self.name) + for metadata in self.keys: + doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) + doc += REQUESTER_DOC_RETURN + func.__doc__ = doc + return func + + +class _MetadataRequester: + """Mixin class for adding metadata request functionality. + + ``BaseEstimator`` inherits from this Mixin. + + .. versionadded:: 1.3 + """ + + if TYPE_CHECKING: # pragma: no cover + # This code is never run in runtime, but it's here for type checking. + # Type checkers fail to understand that the `set_{method}_request` + # methods are dynamically generated, and they complain that they are + # not defined. We define them here to make type checkers happy. + # During type checking analyzers assume this to be True. + # The following list of defined methods mirrors the list of methods + # in SIMPLE_METHODS. + # fmt: off + def set_fit_request(self, **kwargs): pass + def set_partial_fit_request(self, **kwargs): pass + def set_predict_request(self, **kwargs): pass + def set_predict_proba_request(self, **kwargs): pass + def set_predict_log_proba_request(self, **kwargs): pass + def set_decision_function_request(self, **kwargs): pass + def set_score_request(self, **kwargs): pass + def set_split_request(self, **kwargs): pass + def set_transform_request(self, **kwargs): pass + def set_inverse_transform_request(self, **kwargs): pass + # fmt: on + + def __init_subclass__(cls, **kwargs): + """Set the ``set_{method}_request`` methods. + + This uses PEP-487 [1]_ to set the ``set_{method}_request`` methods. It + looks for the information available in the set default values which are + set using ``__metadata_request__*`` class attributes, or inferred + from method signatures. + + The ``__metadata_request__*`` class attributes are used when a method + does not explicitly accept a metadata through its arguments or if the + developer would like to specify a request value for those metadata + which are different from the default ``None``. + + References + ---------- + .. [1] https://www.python.org/dev/peps/pep-0487 + """ + try: + requests = cls._get_default_requests() + except Exception: + # if there are any issues in the default values, it will be raised + # when ``get_metadata_routing`` is called. Here we are going to + # ignore all the issues such as bad defaults etc. + super().__init_subclass__(**kwargs) + return + + for method in SIMPLE_METHODS: + mmr = getattr(requests, method) + # set ``set_{method}_request``` methods + if not len(mmr.requests): + continue + setattr( + cls, + f"set_{method}_request", + RequestMethod(method, sorted(mmr.requests.keys())), + ) + super().__init_subclass__(**kwargs) + + @classmethod + def _build_request_for_signature(cls, router, method): + """Build the `MethodMetadataRequest` for a method using its signature. + + This method takes all arguments from the method signature and uses + ``None`` as their default request value, except ``X``, ``y``, ``Y``, + ``Xt``, ``yt``, ``*args``, and ``**kwargs``. + + Parameters + ---------- + router : MetadataRequest + The parent object for the created `MethodMetadataRequest`. + method : str + The name of the method. + + Returns + ------- + method_request : MethodMetadataRequest + The prepared request using the method's signature. + """ + mmr = MethodMetadataRequest(owner=cls.__name__, method=method) + # Here we use `isfunction` instead of `ismethod` because calling `getattr` + # on a class instead of an instance returns an unbound function. + if not hasattr(cls, method) or not inspect.isfunction(getattr(cls, method)): + return mmr + # ignore the first parameter of the method, which is usually "self" + params = list(inspect.signature(getattr(cls, method)).parameters.items())[1:] + for pname, param in params: + if pname in {"X", "y", "Y", "Xt", "yt"}: + continue + if param.kind in {param.VAR_POSITIONAL, param.VAR_KEYWORD}: + continue + mmr.add_request( + param=pname, + alias=None, + ) + return mmr + + @classmethod + def _get_default_requests(cls): + """Collect default request values. + + This method combines the information present in ``__metadata_request__*`` + class attributes, as well as determining request keys from method + signatures. + """ + requests = MetadataRequest(owner=cls.__name__) + + for method in SIMPLE_METHODS: + setattr( + requests, + method, + cls._build_request_for_signature(router=requests, method=method), + ) + + # Then overwrite those defaults with the ones provided in + # __metadata_request__* attributes. Defaults set in + # __metadata_request__* attributes take precedence over signature + # sniffing. + + # need to go through the MRO since this is a class attribute and + # ``vars`` doesn't report the parent class attributes. We go through + # the reverse of the MRO so that child classes have precedence over + # their parents. + defaults = dict() + for base_class in reversed(inspect.getmro(cls)): + base_defaults = { + attr: value + for attr, value in vars(base_class).items() + if "__metadata_request__" in attr + } + defaults.update(base_defaults) + defaults = dict(sorted(defaults.items())) + + for attr, value in defaults.items(): + # we don't check for attr.startswith() since python prefixes attrs + # starting with __ with the `_ClassName`. + substr = "__metadata_request__" + method = attr[attr.index(substr) + len(substr) :] + for prop, alias in value.items(): + getattr(requests, method).add_request(param=prop, alias=alias) + + return requests + + def _get_metadata_request(self): + """Get requested data properties. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + request : MetadataRequest + A :class:`~sklearn.utils.metadata_routing.MetadataRequest` instance. + """ + if hasattr(self, "_metadata_request"): + requests = get_routing_for_object(self._metadata_request) + else: + requests = self._get_default_requests() + + return requests + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + Returns + ------- + routing : MetadataRequest + A :class:`~sklearn.utils.metadata_routing.MetadataRequest` encapsulating + routing information. + """ + return self._get_metadata_request() + + +# Process Routing in Routers +# ========================== +# This is almost always the only method used in routers to process and route +# given metadata. This is to minimize the boilerplate required in routers. + + +# Here the first two arguments are positional only which makes everything +# passed as keyword argument a metadata. The first two args also have an `_` +# prefix to reduce the chances of name collisions with the passed metadata, and +# since they're positional only, users will never type those underscores. +def process_routing(_obj, _method, /, **kwargs): + """Validate and route input parameters. + + This function is used inside a router's method, e.g. :term:`fit`, + to validate the metadata and handle the routing. + + Assuming this signature: ``fit(self, X, y, sample_weight=None, **fit_params)``, + a call to this function would be: + ``process_routing(self, sample_weight=sample_weight, **fit_params)``. + + Note that if routing is not enabled and ``kwargs`` is empty, then it + returns an empty routing where ``process_routing(...).ANYTHING.ANY_METHOD`` + is always an empty dictionary. + + .. versionadded:: 1.3 + + Parameters + ---------- + _obj : object + An object implementing ``get_metadata_routing``. Typically a + meta-estimator. + + _method : str + The name of the router's method in which this function is called. + + **kwargs : dict + Metadata to be routed. + + Returns + ------- + routed_params : Bunch + A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name": + {prop: value}}}`` which can be used to pass the required metadata to + corresponding methods or corresponding child objects. The object names + are those defined in `obj.get_metadata_routing()`. + """ + if not kwargs: + # If routing is not enabled and kwargs are empty, then we don't have to + # try doing any routing, we can simply return a structure which returns + # an empty dict on routed_params.ANYTHING.ANY_METHOD. + class EmptyRequest: + def get(self, name, default=None): + return Bunch(**{method: dict() for method in METHODS}) + + def __getitem__(self, name): + return Bunch(**{method: dict() for method in METHODS}) + + def __getattr__(self, name): + return Bunch(**{method: dict() for method in METHODS}) + + return EmptyRequest() + + if not (hasattr(_obj, "get_metadata_routing") or isinstance(_obj, MetadataRouter)): + raise AttributeError( + f"The given object ({repr(_obj.__class__.__name__)}) needs to either" + " implement the routing method `get_metadata_routing` or be a" + " `MetadataRouter` instance." + ) + if _method not in METHODS: + raise TypeError( + f"Can only route and process input on these methods: {METHODS}, " + f"while the passed method is: {_method}." + ) + + request_routing = get_routing_for_object(_obj) + request_routing.validate_metadata(params=kwargs, method=_method) + routed_params = request_routing.route_params(params=kwargs, caller=_method) + + return routed_params diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ca49165d6512da35769bc0e3d331453a5462cc8e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a7694d0be2d93b77fc89e7c8eb8d15338fe3ebb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_openmp_helpers.pxd @@ -0,0 +1,33 @@ +# Helpers to safely access OpenMP routines +# +# no-op implementations are provided for the case where OpenMP is not available. +# +# All calls to OpenMP routines should be cimported from this module. + +cdef extern from *: + """ + #ifdef _OPENMP + #include + #define SKLEARN_OPENMP_PARALLELISM_ENABLED 1 + #else + #define SKLEARN_OPENMP_PARALLELISM_ENABLED 0 + #define omp_lock_t int + #define omp_init_lock(l) (void)0 + #define omp_destroy_lock(l) (void)0 + #define omp_set_lock(l) (void)0 + #define omp_unset_lock(l) (void)0 + #define omp_get_thread_num() 0 + #define omp_get_max_threads() 1 + #endif + """ + bint SKLEARN_OPENMP_PARALLELISM_ENABLED + + ctypedef struct omp_lock_t: + pass + + void omp_init_lock(omp_lock_t*) noexcept nogil + void omp_destroy_lock(omp_lock_t*) noexcept nogil + void omp_set_lock(omp_lock_t*) noexcept nogil + void omp_unset_lock(omp_lock_t*) noexcept nogil + int omp_get_thread_num() noexcept nogil + int omp_get_max_threads() noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_param_validation.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_param_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..ae2e9648a4ccb195fd3e14bede1359e161d30846 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_param_validation.py @@ -0,0 +1,905 @@ +import functools +import math +import operator +import re +from abc import ABC, abstractmethod +from collections.abc import Iterable +from inspect import signature +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import csr_matrix, issparse + +from .._config import config_context, get_config +from .validation import _is_arraylike_not_scalar + + +class InvalidParameterError(ValueError, TypeError): + """Custom exception to be raised when the parameter of a class/method/function + does not have a valid type or value. + """ + + # Inherits from ValueError and TypeError to keep backward compatibility. + + +def validate_parameter_constraints(parameter_constraints, params, caller_name): + """Validate types and values of given parameters. + + Parameters + ---------- + parameter_constraints : dict or {"no_validation"} + If "no_validation", validation is skipped for this parameter. + + If a dict, it must be a dictionary `param_name: list of constraints`. + A parameter is valid if it satisfies one of the constraints from the list. + Constraints can be: + - an Interval object, representing a continuous or discrete range of numbers + - the string "array-like" + - the string "sparse matrix" + - the string "random_state" + - callable + - None, meaning that None is a valid value for the parameter + - any type, meaning that any instance of this type is valid + - an Options object, representing a set of elements of a given type + - a StrOptions object, representing a set of strings + - the string "boolean" + - the string "verbose" + - the string "cv_object" + - the string "nan" + - a MissingValues object representing markers for missing values + - a HasMethods object, representing method(s) an object must have + - a Hidden object, representing a constraint not meant to be exposed to the user + + params : dict + A dictionary `param_name: param_value`. The parameters to validate against the + constraints. + + caller_name : str + The name of the estimator or function or method that called this function. + """ + for param_name, param_val in params.items(): + # We allow parameters to not have a constraint so that third party estimators + # can inherit from sklearn estimators without having to necessarily use the + # validation tools. + if param_name not in parameter_constraints: + continue + + constraints = parameter_constraints[param_name] + + if constraints == "no_validation": + continue + + constraints = [make_constraint(constraint) for constraint in constraints] + + for constraint in constraints: + if constraint.is_satisfied_by(param_val): + # this constraint is satisfied, no need to check further. + break + else: + # No constraint is satisfied, raise with an informative message. + + # Ignore constraints that we don't want to expose in the error message, + # i.e. options that are for internal purpose or not officially supported. + constraints = [ + constraint for constraint in constraints if not constraint.hidden + ] + + if len(constraints) == 1: + constraints_str = f"{constraints[0]}" + else: + constraints_str = ( + f"{', '.join([str(c) for c in constraints[:-1]])} or" + f" {constraints[-1]}" + ) + + raise InvalidParameterError( + f"The {param_name!r} parameter of {caller_name} must be" + f" {constraints_str}. Got {param_val!r} instead." + ) + + +def make_constraint(constraint): + """Convert the constraint into the appropriate Constraint object. + + Parameters + ---------- + constraint : object + The constraint to convert. + + Returns + ------- + constraint : instance of _Constraint + The converted constraint. + """ + if isinstance(constraint, str) and constraint == "array-like": + return _ArrayLikes() + if isinstance(constraint, str) and constraint == "sparse matrix": + return _SparseMatrices() + if isinstance(constraint, str) and constraint == "random_state": + return _RandomStates() + if constraint is callable: + return _Callables() + if constraint is None: + return _NoneConstraint() + if isinstance(constraint, type): + return _InstancesOf(constraint) + if isinstance( + constraint, (Interval, StrOptions, Options, HasMethods, MissingValues) + ): + return constraint + if isinstance(constraint, str) and constraint == "boolean": + return _Booleans() + if isinstance(constraint, str) and constraint == "verbose": + return _VerboseHelper() + if isinstance(constraint, str) and constraint == "cv_object": + return _CVObjects() + if isinstance(constraint, Hidden): + constraint = make_constraint(constraint.constraint) + constraint.hidden = True + return constraint + if isinstance(constraint, str) and constraint == "nan": + return _NanConstraint() + raise ValueError(f"Unknown constraint type: {constraint}") + + +def validate_params(parameter_constraints, *, prefer_skip_nested_validation): + """Decorator to validate types and values of functions and methods. + + Parameters + ---------- + parameter_constraints : dict + A dictionary `param_name: list of constraints`. See the docstring of + `validate_parameter_constraints` for a description of the accepted constraints. + + Note that the *args and **kwargs parameters are not validated and must not be + present in the parameter_constraints dictionary. + + prefer_skip_nested_validation : bool + If True, the validation of parameters of inner estimators or functions + called by the decorated function will be skipped. + + This is useful to avoid validating many times the parameters passed by the + user from the public facing API. It's also useful to avoid validating + parameters that we pass internally to inner functions that are guaranteed to + be valid by the test suite. + + It should be set to True for most functions, except for those that receive + non-validated objects as parameters or that are just wrappers around classes + because they only perform a partial validation. + + Returns + ------- + decorated_function : function or method + The decorated function. + """ + + def decorator(func): + # The dict of parameter constraints is set as an attribute of the function + # to make it possible to dynamically introspect the constraints for + # automatic testing. + setattr(func, "_skl_parameter_constraints", parameter_constraints) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + global_skip_validation = get_config()["skip_parameter_validation"] + if global_skip_validation: + return func(*args, **kwargs) + + func_sig = signature(func) + + # Map *args/**kwargs to the function signature + params = func_sig.bind(*args, **kwargs) + params.apply_defaults() + + # ignore self/cls and positional/keyword markers + to_ignore = [ + p.name + for p in func_sig.parameters.values() + if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD) + ] + to_ignore += ["self", "cls"] + params = {k: v for k, v in params.arguments.items() if k not in to_ignore} + + validate_parameter_constraints( + parameter_constraints, params, caller_name=func.__qualname__ + ) + + try: + with config_context( + skip_parameter_validation=( + prefer_skip_nested_validation or global_skip_validation + ) + ): + return func(*args, **kwargs) + except InvalidParameterError as e: + # When the function is just a wrapper around an estimator, we allow + # the function to delegate validation to the estimator, but we replace + # the name of the estimator by the name of the function in the error + # message to avoid confusion. + msg = re.sub( + r"parameter of \w+ must be", + f"parameter of {func.__qualname__} must be", + str(e), + ) + raise InvalidParameterError(msg) from e + + return wrapper + + return decorator + + +class RealNotInt(Real): + """A type that represents reals that are not instances of int. + + Behaves like float, but also works with values extracted from numpy arrays. + isintance(1, RealNotInt) -> False + isinstance(1.0, RealNotInt) -> True + """ + + +RealNotInt.register(float) + + +def _type_name(t): + """Convert type into human readable string.""" + module = t.__module__ + qualname = t.__qualname__ + if module == "builtins": + return qualname + elif t == Real: + return "float" + elif t == Integral: + return "int" + return f"{module}.{qualname}" + + +class _Constraint(ABC): + """Base class for the constraint objects.""" + + def __init__(self): + self.hidden = False + + @abstractmethod + def is_satisfied_by(self, val): + """Whether or not a value satisfies the constraint. + + Parameters + ---------- + val : object + The value to check. + + Returns + ------- + is_satisfied : bool + Whether or not the constraint is satisfied by this value. + """ + + @abstractmethod + def __str__(self): + """A human readable representational string of the constraint.""" + + +class _InstancesOf(_Constraint): + """Constraint representing instances of a given type. + + Parameters + ---------- + type : type + The valid type. + """ + + def __init__(self, type): + super().__init__() + self.type = type + + def is_satisfied_by(self, val): + return isinstance(val, self.type) + + def __str__(self): + return f"an instance of {_type_name(self.type)!r}" + + +class _NoneConstraint(_Constraint): + """Constraint representing the None singleton.""" + + def is_satisfied_by(self, val): + return val is None + + def __str__(self): + return "None" + + +class _NanConstraint(_Constraint): + """Constraint representing the indicator `np.nan`.""" + + def is_satisfied_by(self, val): + return ( + not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val) + ) + + def __str__(self): + return "numpy.nan" + + +class _PandasNAConstraint(_Constraint): + """Constraint representing the indicator `pd.NA`.""" + + def is_satisfied_by(self, val): + try: + import pandas as pd + + return isinstance(val, type(pd.NA)) and pd.isna(val) + except ImportError: + return False + + def __str__(self): + return "pandas.NA" + + +class Options(_Constraint): + """Constraint representing a finite set of instances of a given type. + + Parameters + ---------- + type : type + + options : set + The set of valid scalars. + + deprecated : set or None, default=None + A subset of the `options` to mark as deprecated in the string + representation of the constraint. + """ + + def __init__(self, type, options, *, deprecated=None): + super().__init__() + self.type = type + self.options = options + self.deprecated = deprecated or set() + + if self.deprecated - self.options: + raise ValueError("The deprecated options must be a subset of the options.") + + def is_satisfied_by(self, val): + return isinstance(val, self.type) and val in self.options + + def _mark_if_deprecated(self, option): + """Add a deprecated mark to an option if needed.""" + option_str = f"{option!r}" + if option in self.deprecated: + option_str = f"{option_str} (deprecated)" + return option_str + + def __str__(self): + options_str = ( + f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}" + ) + return f"a {_type_name(self.type)} among {{{options_str}}}" + + +class StrOptions(Options): + """Constraint representing a finite set of strings. + + Parameters + ---------- + options : set of str + The set of valid strings. + + deprecated : set of str or None, default=None + A subset of the `options` to mark as deprecated in the string + representation of the constraint. + """ + + def __init__(self, options, *, deprecated=None): + super().__init__(type=str, options=options, deprecated=deprecated) + + +class Interval(_Constraint): + """Constraint representing a typed interval. + + Parameters + ---------- + type : {numbers.Integral, numbers.Real, RealNotInt} + The set of numbers in which to set the interval. + + If RealNotInt, only reals that don't have the integer type + are allowed. For example 1.0 is allowed but 1 is not. + + left : float or int or None + The left bound of the interval. None means left bound is -∞. + + right : float, int or None + The right bound of the interval. None means right bound is +∞. + + closed : {"left", "right", "both", "neither"} + Whether the interval is open or closed. Possible choices are: + + - `"left"`: the interval is closed on the left and open on the right. + It is equivalent to the interval `[ left, right )`. + - `"right"`: the interval is closed on the right and open on the left. + It is equivalent to the interval `( left, right ]`. + - `"both"`: the interval is closed. + It is equivalent to the interval `[ left, right ]`. + - `"neither"`: the interval is open. + It is equivalent to the interval `( left, right )`. + + Notes + ----- + Setting a bound to `None` and setting the interval closed is valid. For instance, + strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to + `[0, +∞) U {+∞}`. + """ + + def __init__(self, type, left, right, *, closed): + super().__init__() + self.type = type + self.left = left + self.right = right + self.closed = closed + + self._check_params() + + def _check_params(self): + if self.type not in (Integral, Real, RealNotInt): + raise ValueError( + "type must be either numbers.Integral, numbers.Real or RealNotInt." + f" Got {self.type} instead." + ) + + if self.closed not in ("left", "right", "both", "neither"): + raise ValueError( + "closed must be either 'left', 'right', 'both' or 'neither'. " + f"Got {self.closed} instead." + ) + + if self.type is Integral: + suffix = "for an interval over the integers." + if self.left is not None and not isinstance(self.left, Integral): + raise TypeError(f"Expecting left to be an int {suffix}") + if self.right is not None and not isinstance(self.right, Integral): + raise TypeError(f"Expecting right to be an int {suffix}") + if self.left is None and self.closed in ("left", "both"): + raise ValueError( + f"left can't be None when closed == {self.closed} {suffix}" + ) + if self.right is None and self.closed in ("right", "both"): + raise ValueError( + f"right can't be None when closed == {self.closed} {suffix}" + ) + else: + if self.left is not None and not isinstance(self.left, Real): + raise TypeError("Expecting left to be a real number.") + if self.right is not None and not isinstance(self.right, Real): + raise TypeError("Expecting right to be a real number.") + + if self.right is not None and self.left is not None and self.right <= self.left: + raise ValueError( + f"right can't be less than left. Got left={self.left} and " + f"right={self.right}" + ) + + def __contains__(self, val): + if not isinstance(val, Integral) and np.isnan(val): + return False + + left_cmp = operator.lt if self.closed in ("left", "both") else operator.le + right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge + + left = -np.inf if self.left is None else self.left + right = np.inf if self.right is None else self.right + + if left_cmp(val, left): + return False + if right_cmp(val, right): + return False + return True + + def is_satisfied_by(self, val): + if not isinstance(val, self.type): + return False + + return val in self + + def __str__(self): + type_str = "an int" if self.type is Integral else "a float" + left_bracket = "[" if self.closed in ("left", "both") else "(" + left_bound = "-inf" if self.left is None else self.left + right_bound = "inf" if self.right is None else self.right + right_bracket = "]" if self.closed in ("right", "both") else ")" + + # better repr if the bounds were given as integers + if not self.type == Integral and isinstance(self.left, Real): + left_bound = float(left_bound) + if not self.type == Integral and isinstance(self.right, Real): + right_bound = float(right_bound) + + return ( + f"{type_str} in the range " + f"{left_bracket}{left_bound}, {right_bound}{right_bracket}" + ) + + +class _ArrayLikes(_Constraint): + """Constraint representing array-likes""" + + def is_satisfied_by(self, val): + return _is_arraylike_not_scalar(val) + + def __str__(self): + return "an array-like" + + +class _SparseMatrices(_Constraint): + """Constraint representing sparse matrices.""" + + def is_satisfied_by(self, val): + return issparse(val) + + def __str__(self): + return "a sparse matrix" + + +class _Callables(_Constraint): + """Constraint representing callables.""" + + def is_satisfied_by(self, val): + return callable(val) + + def __str__(self): + return "a callable" + + +class _RandomStates(_Constraint): + """Constraint representing random states. + + Convenience class for + [Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + Interval(Integral, 0, 2**32 - 1, closed="both"), + _InstancesOf(np.random.RandomState), + _NoneConstraint(), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class _Booleans(_Constraint): + """Constraint representing boolean likes. + + Convenience class for + [bool, np.bool_, Integral (deprecated)] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + _InstancesOf(bool), + _InstancesOf(np.bool_), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class _VerboseHelper(_Constraint): + """Helper constraint for the verbose parameter. + + Convenience class for + [Interval(Integral, 0, None, closed="left"), bool, numpy.bool_] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + Interval(Integral, 0, None, closed="left"), + _InstancesOf(bool), + _InstancesOf(np.bool_), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class MissingValues(_Constraint): + """Helper constraint for the `missing_values` parameters. + + Convenience for + [ + Integral, + Interval(Real, None, None, closed="both"), + str, # when numeric_only is False + None, # when numeric_only is False + _NanConstraint(), + _PandasNAConstraint(), + ] + + Parameters + ---------- + numeric_only : bool, default=False + Whether to consider only numeric missing value markers. + + """ + + def __init__(self, numeric_only=False): + super().__init__() + + self.numeric_only = numeric_only + + self._constraints = [ + _InstancesOf(Integral), + # we use an interval of Real to ignore np.nan that has its own constraint + Interval(Real, None, None, closed="both"), + _NanConstraint(), + _PandasNAConstraint(), + ] + if not self.numeric_only: + self._constraints.extend([_InstancesOf(str), _NoneConstraint()]) + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class HasMethods(_Constraint): + """Constraint representing objects that expose specific methods. + + It is useful for parameters following a protocol and where we don't want to impose + an affiliation to a specific module or class. + + Parameters + ---------- + methods : str or list of str + The method(s) that the object is expected to expose. + """ + + @validate_params( + {"methods": [str, list]}, + prefer_skip_nested_validation=True, + ) + def __init__(self, methods): + super().__init__() + if isinstance(methods, str): + methods = [methods] + self.methods = methods + + def is_satisfied_by(self, val): + return all(callable(getattr(val, method, None)) for method in self.methods) + + def __str__(self): + if len(self.methods) == 1: + methods = f"{self.methods[0]!r}" + else: + methods = ( + f"{', '.join([repr(m) for m in self.methods[:-1]])} and" + f" {self.methods[-1]!r}" + ) + return f"an object implementing {methods}" + + +class _IterablesNotString(_Constraint): + """Constraint representing iterables that are not strings.""" + + def is_satisfied_by(self, val): + return isinstance(val, Iterable) and not isinstance(val, str) + + def __str__(self): + return "an iterable" + + +class _CVObjects(_Constraint): + """Constraint representing cv objects. + + Convenient class for + [ + Interval(Integral, 2, None, closed="left"), + HasMethods(["split", "get_n_splits"]), + _IterablesNotString(), + None, + ] + """ + + def __init__(self): + super().__init__() + self._constraints = [ + Interval(Integral, 2, None, closed="left"), + HasMethods(["split", "get_n_splits"]), + _IterablesNotString(), + _NoneConstraint(), + ] + + def is_satisfied_by(self, val): + return any(c.is_satisfied_by(val) for c in self._constraints) + + def __str__(self): + return ( + f"{', '.join([str(c) for c in self._constraints[:-1]])} or" + f" {self._constraints[-1]}" + ) + + +class Hidden: + """Class encapsulating a constraint not meant to be exposed to the user. + + Parameters + ---------- + constraint : str or _Constraint instance + The constraint to be used internally. + """ + + def __init__(self, constraint): + self.constraint = constraint + + +def generate_invalid_param_val(constraint): + """Return a value that does not satisfy the constraint. + + Raises a NotImplementedError if there exists no invalid value for this constraint. + + This is only useful for testing purpose. + + Parameters + ---------- + constraint : _Constraint instance + The constraint to generate a value for. + + Returns + ------- + val : object + A value that does not satisfy the constraint. + """ + if isinstance(constraint, StrOptions): + return f"not {' or '.join(constraint.options)}" + + if isinstance(constraint, MissingValues): + return np.array([1, 2, 3]) + + if isinstance(constraint, _VerboseHelper): + return -1 + + if isinstance(constraint, HasMethods): + return type("HasNotMethods", (), {})() + + if isinstance(constraint, _IterablesNotString): + return "a string" + + if isinstance(constraint, _CVObjects): + return "not a cv object" + + if isinstance(constraint, Interval) and constraint.type is Integral: + if constraint.left is not None: + return constraint.left - 1 + if constraint.right is not None: + return constraint.right + 1 + + # There's no integer outside (-inf, +inf) + raise NotImplementedError + + if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt): + if constraint.left is not None: + return constraint.left - 1e-6 + if constraint.right is not None: + return constraint.right + 1e-6 + + # bounds are -inf, +inf + if constraint.closed in ("right", "neither"): + return -np.inf + if constraint.closed in ("left", "neither"): + return np.inf + + # interval is [-inf, +inf] + return np.nan + + raise NotImplementedError + + +def generate_valid_param(constraint): + """Return a value that does satisfy a constraint. + + This is only useful for testing purpose. + + Parameters + ---------- + constraint : Constraint instance + The constraint to generate a value for. + + Returns + ------- + val : object + A value that does satisfy the constraint. + """ + if isinstance(constraint, _ArrayLikes): + return np.array([1, 2, 3]) + + if isinstance(constraint, _SparseMatrices): + return csr_matrix([[0, 1], [1, 0]]) + + if isinstance(constraint, _RandomStates): + return np.random.RandomState(42) + + if isinstance(constraint, _Callables): + return lambda x: x + + if isinstance(constraint, _NoneConstraint): + return None + + if isinstance(constraint, _InstancesOf): + if constraint.type is np.ndarray: + # special case for ndarray since it can't be instantiated without arguments + return np.array([1, 2, 3]) + + if constraint.type in (Integral, Real): + # special case for Integral and Real since they are abstract classes + return 1 + + return constraint.type() + + if isinstance(constraint, _Booleans): + return True + + if isinstance(constraint, _VerboseHelper): + return 1 + + if isinstance(constraint, MissingValues) and constraint.numeric_only: + return np.nan + + if isinstance(constraint, MissingValues) and not constraint.numeric_only: + return "missing" + + if isinstance(constraint, HasMethods): + return type( + "ValidHasMethods", (), {m: lambda self: None for m in constraint.methods} + )() + + if isinstance(constraint, _IterablesNotString): + return [1, 2, 3] + + if isinstance(constraint, _CVObjects): + return 5 + + if isinstance(constraint, Options): # includes StrOptions + for option in constraint.options: + return option + + if isinstance(constraint, Interval): + interval = constraint + if interval.left is None and interval.right is None: + return 0 + elif interval.left is None: + return interval.right - 1 + elif interval.right is None: + return interval.left + 1 + else: + if interval.type is Real: + return (interval.left + interval.right) / 2 + else: + return interval.left + 1 + + raise ValueError(f"Unknown constraint type: {constraint}") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_plotting.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_plotting.py new file mode 100644 index 0000000000000000000000000000000000000000..84eaacc152884f3ba6bba1105b457100887af001 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_plotting.py @@ -0,0 +1,98 @@ +import numpy as np + +from . import check_consistent_length, check_matplotlib_support +from ._response import _get_response_values_binary +from .multiclass import type_of_target +from .validation import _check_pos_label_consistency + + +class _BinaryClassifierCurveDisplayMixin: + """Mixin class to be used in Displays requiring a binary classifier. + + The aim of this class is to centralize some validations regarding the estimator and + the target and gather the response of the estimator. + """ + + def _validate_plot_params(self, *, ax=None, name=None): + check_matplotlib_support(f"{self.__class__.__name__}.plot") + import matplotlib.pyplot as plt + + if ax is None: + _, ax = plt.subplots() + + name = self.estimator_name if name is None else name + return ax, ax.figure, name + + @classmethod + def _validate_and_get_response_values( + cls, estimator, X, y, *, response_method="auto", pos_label=None, name=None + ): + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + name = estimator.__class__.__name__ if name is None else name + + y_pred, pos_label = _get_response_values_binary( + estimator, + X, + response_method=response_method, + pos_label=pos_label, + ) + + return y_pred, pos_label, name + + @classmethod + def _validate_from_predictions_params( + cls, y_true, y_pred, *, sample_weight=None, pos_label=None, name=None + ): + check_matplotlib_support(f"{cls.__name__}.from_predictions") + + if type_of_target(y_true) != "binary": + raise ValueError( + f"The target y is not binary. Got {type_of_target(y_true)} type of" + " target." + ) + + check_consistent_length(y_true, y_pred, sample_weight) + pos_label = _check_pos_label_consistency(pos_label, y_true) + + name = name if name is not None else "Classifier" + + return pos_label, name + + +def _validate_score_name(score_name, scoring, negate_score): + """Validate the `score_name` parameter. + + If `score_name` is provided, we just return it as-is. + If `score_name` is `None`, we use `Score` if `negate_score` is `False` and + `Negative score` otherwise. + If `score_name` is a string or a callable, we infer the name. We replace `_` by + spaces and capitalize the first letter. We remove `neg_` and replace it by + `"Negative"` if `negate_score` is `False` or just remove it otherwise. + """ + if score_name is not None: + return score_name + elif scoring is None: + return "Negative score" if negate_score else "Score" + else: + score_name = scoring.__name__ if callable(scoring) else scoring + if negate_score: + if score_name.startswith("neg_"): + score_name = score_name[4:] + else: + score_name = f"Negative {score_name}" + elif score_name.startswith("neg_"): + score_name = f"Negative {score_name[4:]}" + score_name = score_name.replace("_", " ") + return score_name.capitalize() + + +def _interval_max_min_ratio(data): + """Compute the ratio between the largest and smallest inter-point distances. + + A value larger than 5 typically indicates that the parameter range would + better be displayed with a log scale while a linear scale would be more + suitable otherwise. + """ + diff = np.diff(np.sort(data)) + return diff.max() / diff.min() diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0ebcc1de0cce69cde50615c2e9f7a8d51f2559da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_random.pxd @@ -0,0 +1,36 @@ +# Authors: Arnaud Joly +# +# License: BSD 3 clause + + +cimport numpy as cnp +ctypedef cnp.npy_uint32 UINT32_t + +cdef inline UINT32_t DEFAULT_SEED = 1 + +cdef enum: + # Max value for our rand_r replacement (near the bottom). + # We don't use RAND_MAX because it's different across platforms and + # particularly tiny on Windows/MSVC. + # It corresponds to the maximum representable value for + # 32-bit signed integers (i.e. 2^31 - 1). + RAND_R_MAX = 2147483647 + + +# rand_r replacement using a 32bit XorShift generator +# See http://www.jstatsoft.org/v08/i14/paper for details +cdef inline UINT32_t our_rand_r(UINT32_t* seed) nogil: + """Generate a pseudo-random np.uint32 from a np.uint32 seed""" + # seed shouldn't ever be 0. + if (seed[0] == 0): + seed[0] = DEFAULT_SEED + + seed[0] ^= (seed[0] << 13) + seed[0] ^= (seed[0] >> 17) + seed[0] ^= (seed[0] << 5) + + # Use the modulo to make sure that we don't return a values greater than the + # maximum representable value for signed 32bit integers (i.e. 2^31 - 1). + # Note that the parenthesis are needed to avoid overflow: here + # RAND_R_MAX is cast to UINT32_t before 1 is added. + return seed[0] % ((RAND_R_MAX) + 1) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..04fc2fd38886317ad58921b28a8359824613acec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2bd630fa4db6791823a3bb7769a4aefc438ed09e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_seq_dataset.pxd @@ -0,0 +1,104 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/utils/_seq_dataset.pxd.tp'. +# Changes must be made there. + +"""Dataset abstractions for sequential data access.""" + +cimport numpy as cnp + +# SequentialDataset and its two concrete subclasses are (optionally randomized) +# iterators over the rows of a matrix X and corresponding target values y. + +#------------------------------------------------------------------------------ + +cdef class SequentialDataset64: + cdef int current_index + cdef int[::1] index + cdef int *index_data_ptr + cdef Py_ssize_t n_samples + cdef cnp.uint32_t seed + + cdef void shuffle(self, cnp.uint32_t seed) noexcept nogil + cdef int _get_next_index(self) noexcept nogil + cdef int _get_random_index(self) noexcept nogil + + cdef void _sample(self, double **x_data_ptr, int **x_ind_ptr, + int *nnz, double *y, double *sample_weight, + int current_index) noexcept nogil + cdef void next(self, double **x_data_ptr, int **x_ind_ptr, + int *nnz, double *y, double *sample_weight) noexcept nogil + cdef int random(self, double **x_data_ptr, int **x_ind_ptr, + int *nnz, double *y, double *sample_weight) noexcept nogil + + +cdef class ArrayDataset64(SequentialDataset64): + cdef const double[:, ::1] X + cdef const double[::1] Y + cdef const double[::1] sample_weights + cdef Py_ssize_t n_features + cdef cnp.npy_intp X_stride + cdef double *X_data_ptr + cdef double *Y_data_ptr + cdef const int[::1] feature_indices + cdef int *feature_indices_ptr + cdef double *sample_weight_data + + +cdef class CSRDataset64(SequentialDataset64): + cdef const double[::1] X_data + cdef const int[::1] X_indptr + cdef const int[::1] X_indices + cdef const double[::1] Y + cdef const double[::1] sample_weights + cdef double *X_data_ptr + cdef int *X_indptr_ptr + cdef int *X_indices_ptr + cdef double *Y_data_ptr + cdef double *sample_weight_data + +#------------------------------------------------------------------------------ + +cdef class SequentialDataset32: + cdef int current_index + cdef int[::1] index + cdef int *index_data_ptr + cdef Py_ssize_t n_samples + cdef cnp.uint32_t seed + + cdef void shuffle(self, cnp.uint32_t seed) noexcept nogil + cdef int _get_next_index(self) noexcept nogil + cdef int _get_random_index(self) noexcept nogil + + cdef void _sample(self, float **x_data_ptr, int **x_ind_ptr, + int *nnz, float *y, float *sample_weight, + int current_index) noexcept nogil + cdef void next(self, float **x_data_ptr, int **x_ind_ptr, + int *nnz, float *y, float *sample_weight) noexcept nogil + cdef int random(self, float **x_data_ptr, int **x_ind_ptr, + int *nnz, float *y, float *sample_weight) noexcept nogil + + +cdef class ArrayDataset32(SequentialDataset32): + cdef const float[:, ::1] X + cdef const float[::1] Y + cdef const float[::1] sample_weights + cdef Py_ssize_t n_features + cdef cnp.npy_intp X_stride + cdef float *X_data_ptr + cdef float *Y_data_ptr + cdef const int[::1] feature_indices + cdef int *feature_indices_ptr + cdef float *sample_weight_data + + +cdef class CSRDataset32(SequentialDataset32): + cdef const float[::1] X_data + cdef const int[::1] X_indptr + cdef const int[::1] X_indices + cdef const float[::1] Y + cdef const float[::1] sample_weights + cdef float *X_data_ptr + cdef int *X_indptr_ptr + cdef int *X_indices_ptr + cdef float *Y_data_ptr + cdef float *sample_weight_data diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_set_output.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_set_output.py new file mode 100644 index 0000000000000000000000000000000000000000..cf7364e11732056c1cb1987d9da2633ad80870e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_set_output.py @@ -0,0 +1,441 @@ +import importlib +from functools import wraps +from typing import Protocol, runtime_checkable + +import numpy as np +from scipy.sparse import issparse + +from .._config import get_config +from ._available_if import available_if + + +def check_library_installed(library): + """Check library is installed.""" + try: + return importlib.import_module(library) + except ImportError as exc: + raise ImportError( + f"Setting output container to '{library}' requires {library} to be" + " installed" + ) from exc + + +def get_columns(columns): + if callable(columns): + try: + return columns() + except Exception: + return None + return columns + + +@runtime_checkable +class ContainerAdapterProtocol(Protocol): + container_lib: str + + def create_container(self, X_output, X_original, columns, inplace=False): + """Create container from `X_output` with additional metadata. + + Parameters + ---------- + X_output : {ndarray, dataframe} + Data to wrap. + + X_original : {ndarray, dataframe} + Original input dataframe. This is used to extract the metadata that should + be passed to `X_output`, e.g. pandas row index. + + columns : callable, ndarray, or None + The column names or a callable that returns the column names. The + callable is useful if the column names require some computation. If `None`, + then no columns are passed to the container's constructor. + + inplace : bool, default=False + Whether or not we intend to modify `X_output` in-place. However, it does + not guarantee that we return the same object if the in-place operation + is not possible. + + Returns + ------- + wrapped_output : container_type + `X_output` wrapped into the container type. + """ + + def is_supported_container(self, X): + """Return True if X is a supported container. + + Parameters + ---------- + Xs: container + Containers to be checked. + + Returns + ------- + is_supported_container : bool + True if X is a supported container. + """ + + def rename_columns(self, X, columns): + """Rename columns in `X`. + + Parameters + ---------- + X : container + Container which columns is updated. + + columns : ndarray of str + Columns to update the `X`'s columns with. + + Returns + ------- + updated_container : container + Container with new names. + """ + + def hstack(self, Xs): + """Stack containers horizontally (column-wise). + + Parameters + ---------- + Xs : list of containers + List of containers to stack. + + Returns + ------- + stacked_Xs : container + Stacked containers. + """ + + +class PandasAdapter: + container_lib = "pandas" + + def create_container(self, X_output, X_original, columns, inplace=True): + pd = check_library_installed("pandas") + columns = get_columns(columns) + + if not inplace or not isinstance(X_output, pd.DataFrame): + # In all these cases, we need to create a new DataFrame + + # Unfortunately, we cannot use `getattr(container, "index")` + # because `list` exposes an `index` attribute. + if isinstance(X_output, pd.DataFrame): + index = X_output.index + elif isinstance(X_original, pd.DataFrame): + index = X_original.index + else: + index = None + + # We don't pass columns here because it would intend columns selection + # instead of renaming. + X_output = pd.DataFrame(X_output, index=index, copy=not inplace) + + if columns is not None: + return self.rename_columns(X_output, columns) + return X_output + + def is_supported_container(self, X): + pd = check_library_installed("pandas") + return isinstance(X, pd.DataFrame) + + def rename_columns(self, X, columns): + # we cannot use `rename` since it takes a dictionary and at this stage we have + # potentially duplicate column names in `X` + X.columns = columns + return X + + def hstack(self, Xs): + pd = check_library_installed("pandas") + return pd.concat(Xs, axis=1) + + +class PolarsAdapter: + container_lib = "polars" + + def create_container(self, X_output, X_original, columns, inplace=True): + pl = check_library_installed("polars") + columns = get_columns(columns) + columns = columns.tolist() if isinstance(columns, np.ndarray) else columns + + if not inplace or not isinstance(X_output, pl.DataFrame): + # In all these cases, we need to create a new DataFrame + return pl.DataFrame(X_output, schema=columns, orient="row") + + if columns is not None: + return self.rename_columns(X_output, columns) + return X_output + + def is_supported_container(self, X): + pl = check_library_installed("polars") + return isinstance(X, pl.DataFrame) + + def rename_columns(self, X, columns): + # we cannot use `rename` since it takes a dictionary and at this stage we have + # potentially duplicate column names in `X` + X.columns = columns + return X + + def hstack(self, Xs): + pl = check_library_installed("polars") + return pl.concat(Xs, how="horizontal") + + +class ContainerAdaptersManager: + def __init__(self): + self.adapters = {} + + @property + def supported_outputs(self): + return {"default"} | set(self.adapters) + + def register(self, adapter): + self.adapters[adapter.container_lib] = adapter + + +ADAPTERS_MANAGER = ContainerAdaptersManager() +ADAPTERS_MANAGER.register(PandasAdapter()) +ADAPTERS_MANAGER.register(PolarsAdapter()) + + +def _get_container_adapter(method, estimator=None): + """Get container adapter.""" + dense_config = _get_output_config(method, estimator)["dense"] + try: + return ADAPTERS_MANAGER.adapters[dense_config] + except KeyError: + return None + + +def _get_output_config(method, estimator=None): + """Get output config based on estimator and global configuration. + + Parameters + ---------- + method : {"transform"} + Estimator's method for which the output container is looked up. + + estimator : estimator instance or None + Estimator to get the output configuration from. If `None`, check global + configuration is used. + + Returns + ------- + config : dict + Dictionary with keys: + + - "dense": specifies the dense container for `method`. This can be + `"default"` or `"pandas"`. + """ + est_sklearn_output_config = getattr(estimator, "_sklearn_output_config", {}) + if method in est_sklearn_output_config: + dense_config = est_sklearn_output_config[method] + else: + dense_config = get_config()[f"{method}_output"] + + supported_outputs = ADAPTERS_MANAGER.supported_outputs + if dense_config not in supported_outputs: + raise ValueError( + f"output config must be in {sorted(supported_outputs)}, got {dense_config}" + ) + + return {"dense": dense_config} + + +def _wrap_data_with_container(method, data_to_wrap, original_input, estimator): + """Wrap output with container based on an estimator's or global config. + + Parameters + ---------- + method : {"transform"} + Estimator's method to get container output for. + + data_to_wrap : {ndarray, dataframe} + Data to wrap with container. + + original_input : {ndarray, dataframe} + Original input of function. + + estimator : estimator instance + Estimator with to get the output configuration from. + + Returns + ------- + output : {ndarray, dataframe} + If the output config is "default" or the estimator is not configured + for wrapping return `data_to_wrap` unchanged. + If the output config is "pandas", return `data_to_wrap` as a pandas + DataFrame. + """ + output_config = _get_output_config(method, estimator) + + if output_config["dense"] == "default" or not _auto_wrap_is_configured(estimator): + return data_to_wrap + + dense_config = output_config["dense"] + if issparse(data_to_wrap): + raise ValueError( + "The transformer outputs a scipy sparse matrix. " + "Try to set the transformer output to a dense array or disable " + f"{dense_config.capitalize()} output with set_output(transform='default')." + ) + + adapter = ADAPTERS_MANAGER.adapters[dense_config] + return adapter.create_container( + data_to_wrap, + original_input, + columns=estimator.get_feature_names_out, + ) + + +def _wrap_method_output(f, method): + """Wrapper used by `_SetOutputMixin` to automatically wrap methods.""" + + @wraps(f) + def wrapped(self, X, *args, **kwargs): + data_to_wrap = f(self, X, *args, **kwargs) + if isinstance(data_to_wrap, tuple): + # only wrap the first output for cross decomposition + return_tuple = ( + _wrap_data_with_container(method, data_to_wrap[0], X, self), + *data_to_wrap[1:], + ) + # Support for namedtuples `_make` is a documented API for namedtuples: + # https://docs.python.org/3/library/collections.html#collections.somenamedtuple._make + if hasattr(type(data_to_wrap), "_make"): + return type(data_to_wrap)._make(return_tuple) + return return_tuple + + return _wrap_data_with_container(method, data_to_wrap, X, self) + + return wrapped + + +def _auto_wrap_is_configured(estimator): + """Return True if estimator is configured for auto-wrapping the transform method. + + `_SetOutputMixin` sets `_sklearn_auto_wrap_output_keys` to `set()` if auto wrapping + is manually disabled. + """ + auto_wrap_output_keys = getattr(estimator, "_sklearn_auto_wrap_output_keys", set()) + return ( + hasattr(estimator, "get_feature_names_out") + and "transform" in auto_wrap_output_keys + ) + + +class _SetOutputMixin: + """Mixin that dynamically wraps methods to return container based on config. + + Currently `_SetOutputMixin` wraps `transform` and `fit_transform` and configures + it based on `set_output` of the global configuration. + + `set_output` is only defined if `get_feature_names_out` is defined and + `auto_wrap_output_keys` is the default value. + """ + + def __init_subclass__(cls, auto_wrap_output_keys=("transform",), **kwargs): + super().__init_subclass__(**kwargs) + + # Dynamically wraps `transform` and `fit_transform` and configure it's + # output based on `set_output`. + if not ( + isinstance(auto_wrap_output_keys, tuple) or auto_wrap_output_keys is None + ): + raise ValueError("auto_wrap_output_keys must be None or a tuple of keys.") + + if auto_wrap_output_keys is None: + cls._sklearn_auto_wrap_output_keys = set() + return + + # Mapping from method to key in configurations + method_to_key = { + "transform": "transform", + "fit_transform": "transform", + } + cls._sklearn_auto_wrap_output_keys = set() + + for method, key in method_to_key.items(): + if not hasattr(cls, method) or key not in auto_wrap_output_keys: + continue + cls._sklearn_auto_wrap_output_keys.add(key) + + # Only wrap methods defined by cls itself + if method not in cls.__dict__: + continue + wrapped_method = _wrap_method_output(getattr(cls, method), key) + setattr(cls, method, wrapped_method) + + @available_if(_auto_wrap_is_configured) + def set_output(self, *, transform=None): + """Set output container. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + Parameters + ---------- + transform : {"default", "pandas"}, default=None + Configure output of `transform` and `fit_transform`. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + .. versionadded:: 1.4 + `"polars"` option was added. + + Returns + ------- + self : estimator instance + Estimator instance. + """ + if transform is None: + return self + + if not hasattr(self, "_sklearn_output_config"): + self._sklearn_output_config = {} + + self._sklearn_output_config["transform"] = transform + return self + + +def _safe_set_output(estimator, *, transform=None): + """Safely call estimator.set_output and error if it not available. + + This is used by meta-estimators to set the output for child estimators. + + Parameters + ---------- + estimator : estimator instance + Estimator instance. + + transform : {"default", "pandas"}, default=None + Configure output of the following estimator's methods: + + - `"transform"` + - `"fit_transform"` + + If `None`, this operation is a no-op. + + Returns + ------- + estimator : estimator instance + Estimator instance. + """ + set_output_for_transform = ( + hasattr(estimator, "transform") + or hasattr(estimator, "fit_transform") + and transform is not None + ) + if not set_output_for_transform: + # If estimator can not transform, then `set_output` does not need to be + # called. + return + + if not hasattr(estimator, "set_output"): + raise ValueError( + f"Unable to configure output for {estimator} because `set_output` " + "is not available." + ) + return estimator.set_output(transform=transform) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..76592d0a20e9d4d8782e98c7b0ea7301429df1a7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_sorting.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_sorting.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_sorting.pxd new file mode 100644 index 0000000000000000000000000000000000000000..51f21afd4d3e401bfe4021ff308cd2d5c36e5c4b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_sorting.pxd @@ -0,0 +1,9 @@ +from ._typedefs cimport intp_t + +from cython cimport floating + +cdef int simultaneous_sort( + floating *dist, + intp_t *idx, + intp_t size, +) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_tags.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_tags.py new file mode 100644 index 0000000000000000000000000000000000000000..c8f6ffb651a0de206ae078c8628b0bf03c82392a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_tags.py @@ -0,0 +1,68 @@ +import numpy as np + +_DEFAULT_TAGS = { + "array_api_support": False, + "non_deterministic": False, + "requires_positive_X": False, + "requires_positive_y": False, + "X_types": ["2darray"], + "poor_score": False, + "no_validation": False, + "multioutput": False, + "allow_nan": False, + "stateless": False, + "multilabel": False, + "_skip_test": False, + "_xfail_checks": False, + "multioutput_only": False, + "binary_only": False, + "requires_fit": True, + "preserves_dtype": [np.float64], + "requires_y": False, + "pairwise": False, +} + + +def _safe_tags(estimator, key=None): + """Safely get estimator tags. + + :class:`~sklearn.BaseEstimator` provides the estimator tags machinery. + However, if an estimator does not inherit from this base class, we should + fall-back to the default tags. + + For scikit-learn built-in estimators, we should still rely on + `self._get_tags()`. `_safe_tags(est)` should be used when we are not sure + where `est` comes from: typically `_safe_tags(self.base_estimator)` where + `self` is a meta-estimator, or in the common checks. + + Parameters + ---------- + estimator : estimator object + The estimator from which to get the tag. + + key : str, default=None + Tag name to get. By default (`None`), all tags are returned. + + Returns + ------- + tags : dict or tag value + The estimator tags. A single value is returned if `key` is not None. + """ + if hasattr(estimator, "_get_tags"): + tags_provider = "_get_tags()" + tags = estimator._get_tags() + elif hasattr(estimator, "_more_tags"): + tags_provider = "_more_tags()" + tags = {**_DEFAULT_TAGS, **estimator._more_tags()} + else: + tags_provider = "_DEFAULT_TAGS" + tags = _DEFAULT_TAGS + + if key is not None: + if key not in tags: + raise ValueError( + f"The key {key} is not defined in {tags_provider} for the " + f"class {estimator.__class__.__name__}." + ) + return tags[key] + return tags diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cd65d20e367fd5e1dc0b2d0aba1cb07cc0d34383 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_typedefs.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_typedefs.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_typedefs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3ffe5b3b41098f75be516ff96b84d9d0ea621824 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_typedefs.pxd @@ -0,0 +1,29 @@ +# Commonly used types +# These are redefinitions of the ones defined by numpy in +# https://github.com/numpy/numpy/blob/main/numpy/__init__.pxd +# and exposed by cython in +# https://github.com/cython/cython/blob/master/Cython/Includes/numpy/__init__.pxd. +# It will eventually avoid having to always include the numpy headers even when we +# would only use it for the types. +# +# When used to declare variables that will receive values from numpy arrays, it +# should match the dtype of the array. For example, to declare a variable that will +# receive values from a numpy array of dtype np.float64, the type float64_t must be +# used. +# +# TODO: Stop defining custom types locally or globally like DTYPE_t and friends and +# use these consistently throughout the codebase. +# NOTE: Extend this list as needed when converting more cython extensions. +ctypedef unsigned char uint8_t +ctypedef unsigned int uint32_t +ctypedef unsigned long long uint64_t +ctypedef Py_ssize_t intp_t +ctypedef float float32_t +ctypedef double float64_t +# Sparse matrices indices and indices' pointers arrays must use int32_t over +# intp_t because intp_t is platform dependent. +# When large sparse matrices are supported, indexing must use int64_t. +# See https://github.com/scikit-learn/scikit-learn/issues/23653 which tracks the +# ongoing work to support large sparse matrices. +ctypedef signed int int32_t +ctypedef signed long long int64_t diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..45922b85d73f0625701a20fd5986d3fd890333c7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd new file mode 100644 index 0000000000000000000000000000000000000000..64de6c18830b5e24c77bfed38cfffccc3b62955a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_vector_sentinel.pxd @@ -0,0 +1,12 @@ +cimport numpy as cnp + +from libcpp.vector cimport vector +from ..utils._typedefs cimport intp_t, float64_t, int32_t, int64_t + +ctypedef fused vector_typed: + vector[float64_t] + vector[intp_t] + vector[int32_t] + vector[int64_t] + +cdef cnp.ndarray vector_to_nd_array(vector_typed * vect_ptr) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a8ef0ab53d04848fbfe4b68f493146badb09d734 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/_weight_vector.pxd @@ -0,0 +1,48 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/utils/_weight_vector.pxd.tp'. +# Changes must be made there. + + +cdef class WeightVector64(object): + cdef readonly double[::1] w + cdef readonly double[::1] aw + cdef double *w_data_ptr + cdef double *aw_data_ptr + + cdef double wscale + cdef double average_a + cdef double average_b + cdef int n_features + cdef double sq_norm + + cdef void add(self, double *x_data_ptr, int *x_ind_ptr, + int xnnz, double c) noexcept nogil + cdef void add_average(self, double *x_data_ptr, int *x_ind_ptr, + int xnnz, double c, double num_iter) noexcept nogil + cdef double dot(self, double *x_data_ptr, int *x_ind_ptr, + int xnnz) noexcept nogil + cdef void scale(self, double c) noexcept nogil + cdef void reset_wscale(self) noexcept nogil + cdef double norm(self) noexcept nogil + +cdef class WeightVector32(object): + cdef readonly float[::1] w + cdef readonly float[::1] aw + cdef float *w_data_ptr + cdef float *aw_data_ptr + + cdef double wscale + cdef double average_a + cdef double average_b + cdef int n_features + cdef double sq_norm + + cdef void add(self, float *x_data_ptr, int *x_ind_ptr, + int xnnz, float c) noexcept nogil + cdef void add_average(self, float *x_data_ptr, int *x_ind_ptr, + int xnnz, float c, float num_iter) noexcept nogil + cdef float dot(self, float *x_data_ptr, int *x_ind_ptr, + int xnnz) noexcept nogil + cdef void scale(self, float c) noexcept nogil + cdef void reset_wscale(self) noexcept nogil + cdef float norm(self) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..69d04db31d222b55a767ae976e7302a2b773274b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/arrayfuncs.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/class_weight.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/class_weight.py new file mode 100644 index 0000000000000000000000000000000000000000..55802f780ed4194c66cbd8d3a98c8edb669720b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/class_weight.py @@ -0,0 +1,224 @@ +""" +The :mod:`sklearn.utils.class_weight` module includes utilities for handling +weights based on class labels. +""" + +# Authors: Andreas Mueller +# Manoj Kumar +# License: BSD 3 clause + +import numpy as np +from scipy import sparse + +from ._param_validation import StrOptions, validate_params + + +@validate_params( + { + "class_weight": [dict, StrOptions({"balanced"}), None], + "classes": [np.ndarray], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def compute_class_weight(class_weight, *, classes, y): + """Estimate class weights for unbalanced datasets. + + Parameters + ---------- + class_weight : dict, "balanced" or None + If "balanced", class weights will be given by + `n_samples / (n_classes * np.bincount(y))`. + If a dictionary is given, keys are classes and values are corresponding class + weights. + If `None` is given, the class weights will be uniform. + + classes : ndarray + Array of the classes occurring in the data, as given by + `np.unique(y_org)` with `y_org` the original class labels. + + y : array-like of shape (n_samples,) + Array of original class labels per sample. + + Returns + ------- + class_weight_vect : ndarray of shape (n_classes,) + Array with `class_weight_vect[i]` the weight for i-th class. + + References + ---------- + The "balanced" heuristic is inspired by + Logistic Regression in Rare Events Data, King, Zen, 2001. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils.class_weight import compute_class_weight + >>> y = [1, 1, 1, 1, 0, 0] + >>> compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y) + array([1.5 , 0.75]) + """ + # Import error caused by circular imports. + from ..preprocessing import LabelEncoder + + if set(y) - set(classes): + raise ValueError("classes should include all valid labels that can be in y") + if class_weight is None or len(class_weight) == 0: + # uniform class weights + weight = np.ones(classes.shape[0], dtype=np.float64, order="C") + elif class_weight == "balanced": + # Find the weight of each class as present in y. + le = LabelEncoder() + y_ind = le.fit_transform(y) + if not all(np.isin(classes, le.classes_)): + raise ValueError("classes should have valid labels that are in y") + + recip_freq = len(y) / (len(le.classes_) * np.bincount(y_ind).astype(np.float64)) + weight = recip_freq[le.transform(classes)] + else: + # user-defined dictionary + weight = np.ones(classes.shape[0], dtype=np.float64, order="C") + unweighted_classes = [] + for i, c in enumerate(classes): + if c in class_weight: + weight[i] = class_weight[c] + else: + unweighted_classes.append(c) + + n_weighted_classes = len(classes) - len(unweighted_classes) + if unweighted_classes and n_weighted_classes != len(class_weight): + unweighted_classes_user_friendly_str = np.array(unweighted_classes).tolist() + raise ValueError( + f"The classes, {unweighted_classes_user_friendly_str}, are not in" + " class_weight" + ) + + return weight + + +@validate_params( + { + "class_weight": [dict, list, StrOptions({"balanced"}), None], + "y": ["array-like", "sparse matrix"], + "indices": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def compute_sample_weight(class_weight, y, *, indices=None): + """Estimate sample weights by class for unbalanced datasets. + + Parameters + ---------- + class_weight : dict, list of dicts, "balanced", or None + Weights associated with classes in the form `{class_label: weight}`. + If not given, all classes are supposed to have weight one. For + multi-output problems, a list of dicts can be provided in the same + order as the columns of y. + + Note that for multioutput (including multilabel) weights should be + defined for each class of every column in its own dict. For example, + for four-class multilabel classification weights should be + `[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}]` instead of + `[{1:1}, {2:5}, {3:1}, {4:1}]`. + + The `"balanced"` mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data: + `n_samples / (n_classes * np.bincount(y))`. + + For multi-output, the weights of each column of y will be multiplied. + + y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) + Array of original class labels per sample. + + indices : array-like of shape (n_subsample,), default=None + Array of indices to be used in a subsample. Can be of length less than + `n_samples` in the case of a subsample, or equal to `n_samples` in the + case of a bootstrap subsample with repeated indices. If `None`, the + sample weight will be calculated over the full sample. Only `"balanced"` + is supported for `class_weight` if this is provided. + + Returns + ------- + sample_weight_vect : ndarray of shape (n_samples,) + Array with sample weights as applied to the original `y`. + + Examples + -------- + >>> from sklearn.utils.class_weight import compute_sample_weight + >>> y = [1, 1, 1, 1, 0, 0] + >>> compute_sample_weight(class_weight="balanced", y=y) + array([0.75, 0.75, 0.75, 0.75, 1.5 , 1.5 ]) + """ + + # Ensure y is 2D. Sparse matrices are already 2D. + if not sparse.issparse(y): + y = np.atleast_1d(y) + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + n_outputs = y.shape[1] + + if indices is not None and class_weight != "balanced": + raise ValueError( + "The only valid class_weight for subsampling is 'balanced'. " + f"Given {class_weight}." + ) + elif n_outputs > 1: + if class_weight is None or isinstance(class_weight, dict): + raise ValueError( + "For multi-output, class_weight should be a list of dicts, or the " + "string 'balanced'." + ) + elif isinstance(class_weight, list) and len(class_weight) != n_outputs: + raise ValueError( + "For multi-output, number of elements in class_weight should match " + f"number of outputs. Got {len(class_weight)} element(s) while having " + f"{n_outputs} outputs." + ) + + expanded_class_weight = [] + for k in range(n_outputs): + if sparse.issparse(y): + # Ok to densify a single column at a time + y_full = y[:, [k]].toarray().flatten() + else: + y_full = y[:, k] + classes_full = np.unique(y_full) + classes_missing = None + + if class_weight == "balanced" or n_outputs == 1: + class_weight_k = class_weight + else: + class_weight_k = class_weight[k] + + if indices is not None: + # Get class weights for the subsample, covering all classes in + # case some labels that were present in the original data are + # missing from the sample. + y_subsample = y_full[indices] + classes_subsample = np.unique(y_subsample) + + weight_k = np.take( + compute_class_weight( + class_weight_k, classes=classes_subsample, y=y_subsample + ), + np.searchsorted(classes_subsample, classes_full), + mode="clip", + ) + + classes_missing = set(classes_full) - set(classes_subsample) + else: + weight_k = compute_class_weight( + class_weight_k, classes=classes_full, y=y_full + ) + + weight_k = weight_k[np.searchsorted(classes_full, y_full)] + + if classes_missing: + # Make missing classes' weight zero + weight_k[np.isin(y_full, list(classes_missing))] = 0.0 + + expanded_class_weight.append(weight_k) + + expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64) + + return expanded_class_weight diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/estimator_checks.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/estimator_checks.py new file mode 100644 index 0000000000000000000000000000000000000000..535862fcd8f1cdad9a0bfb36468eb4f0e244d647 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/estimator_checks.py @@ -0,0 +1,4728 @@ +""" +The :mod:`sklearn.utils.estimator_checks` module includes various utilities to +check the compatibility of estimators with the scikit-learn API. +""" + +import pickle +import re +import warnings +from contextlib import nullcontext +from copy import deepcopy +from functools import partial, wraps +from inspect import signature +from numbers import Integral, Real + +import joblib +import numpy as np +from scipy import sparse +from scipy.stats import rankdata + +from .. import config_context +from ..base import ( + ClusterMixin, + RegressorMixin, + clone, + is_classifier, + is_outlier_detector, + is_regressor, +) +from ..datasets import ( + load_iris, + make_blobs, + make_classification, + make_multilabel_classification, + make_regression, +) +from ..exceptions import DataConversionWarning, NotFittedError, SkipTestWarning +from ..feature_selection import SelectFromModel, SelectKBest +from ..linear_model import ( + LinearRegression, + LogisticRegression, + RANSACRegressor, + Ridge, + SGDRegressor, +) +from ..metrics import accuracy_score, adjusted_rand_score, f1_score +from ..metrics.pairwise import linear_kernel, pairwise_distances, rbf_kernel +from ..model_selection import ShuffleSplit, train_test_split +from ..model_selection._validation import _safe_split +from ..pipeline import make_pipeline +from ..preprocessing import StandardScaler, scale +from ..random_projection import BaseRandomProjection +from ..tree import DecisionTreeClassifier, DecisionTreeRegressor +from ..utils._array_api import ( + _convert_to_numpy, + get_namespace, + yield_namespace_device_dtype_combinations, +) +from ..utils._array_api import ( + device as array_device, +) +from ..utils._param_validation import ( + InvalidParameterError, + generate_invalid_param_val, + make_constraint, +) +from ..utils.fixes import parse_version, sp_version +from ..utils.validation import check_is_fitted +from . import IS_PYPY, is_scalar_nan, shuffle +from ._param_validation import Interval +from ._tags import ( + _DEFAULT_TAGS, + _safe_tags, +) +from ._testing import ( + SkipTest, + _array_api_for_tests, + _get_args, + assert_allclose, + assert_allclose_dense_sparse, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_raise_message, + create_memmap_backed_data, + ignore_warnings, + raises, + set_random_state, +) +from .validation import _num_samples, has_fit_parameter + +REGRESSION_DATASET = None +CROSS_DECOMPOSITION = ["PLSCanonical", "PLSRegression", "CCA", "PLSSVD"] + + +def _yield_checks(estimator): + name = estimator.__class__.__name__ + tags = _safe_tags(estimator) + + yield check_no_attributes_set_in_init + yield check_estimators_dtypes + yield check_fit_score_takes_y + if has_fit_parameter(estimator, "sample_weight"): + yield check_sample_weights_pandas_series + yield check_sample_weights_not_an_array + yield check_sample_weights_list + if not tags["pairwise"]: + # We skip pairwise because the data is not pairwise + yield check_sample_weights_shape + yield check_sample_weights_not_overwritten + yield partial(check_sample_weights_invariance, kind="ones") + yield partial(check_sample_weights_invariance, kind="zeros") + yield check_estimators_fit_returns_self + yield partial(check_estimators_fit_returns_self, readonly_memmap=True) + + # Check that all estimator yield informative messages when + # trained on empty datasets + if not tags["no_validation"]: + yield check_complex_data + yield check_dtype_object + yield check_estimators_empty_data_messages + + if name not in CROSS_DECOMPOSITION: + # cross-decomposition's "transform" returns X and Y + yield check_pipeline_consistency + + if not tags["allow_nan"] and not tags["no_validation"]: + # Test that all estimators check their input for NaN's and infs + yield check_estimators_nan_inf + + if tags["pairwise"]: + # Check that pairwise estimator throws error on non-square input + yield check_nonsquare_error + + yield check_estimators_overwrite_params + if hasattr(estimator, "sparsify"): + yield check_sparsify_coefficients + + yield check_estimator_sparse_data + + # Test that estimators can be pickled, and once pickled + # give the same answer as before. + yield check_estimators_pickle + yield partial(check_estimators_pickle, readonly_memmap=True) + + yield check_estimator_get_tags_default_keys + + if tags["array_api_support"]: + for check in _yield_array_api_checks(estimator): + yield check + + +def _yield_classifier_checks(classifier): + tags = _safe_tags(classifier) + + # test classifiers can handle non-array data and pandas objects + yield check_classifier_data_not_an_array + # test classifiers trained on a single label always return this label + yield check_classifiers_one_label + yield check_classifiers_one_label_sample_weights + yield check_classifiers_classes + yield check_estimators_partial_fit_n_features + if tags["multioutput"]: + yield check_classifier_multioutput + # basic consistency testing + yield check_classifiers_train + yield partial(check_classifiers_train, readonly_memmap=True) + yield partial(check_classifiers_train, readonly_memmap=True, X_dtype="float32") + yield check_classifiers_regression_target + if tags["multilabel"]: + yield check_classifiers_multilabel_representation_invariance + yield check_classifiers_multilabel_output_format_predict + yield check_classifiers_multilabel_output_format_predict_proba + yield check_classifiers_multilabel_output_format_decision_function + if not tags["no_validation"]: + yield check_supervised_y_no_nan + if not tags["multioutput_only"]: + yield check_supervised_y_2d + if tags["requires_fit"]: + yield check_estimators_unfitted + if "class_weight" in classifier.get_params().keys(): + yield check_class_weight_classifiers + + yield check_non_transformer_estimators_n_iter + # test if predict_proba is a monotonic transformation of decision_function + yield check_decision_proba_consistency + + +@ignore_warnings(category=FutureWarning) +def check_supervised_y_no_nan(name, estimator_orig): + # Checks that the Estimator targets are not NaN. + estimator = clone(estimator_orig) + rng = np.random.RandomState(888) + X = rng.standard_normal(size=(10, 5)) + + for value in [np.nan, np.inf]: + y = np.full(10, value) + y = _enforce_estimator_tags_y(estimator, y) + + module_name = estimator.__module__ + if module_name.startswith("sklearn.") and not ( + "test_" in module_name or module_name.endswith("_testing") + ): + # In scikit-learn we want the error message to mention the input + # name and be specific about the kind of unexpected value. + if np.isinf(value): + match = ( + r"Input (y|Y) contains infinity or a value too large for" + r" dtype\('float64'\)." + ) + else: + match = r"Input (y|Y) contains NaN." + else: + # Do not impose a particular error message to third-party libraries. + match = None + err_msg = ( + f"Estimator {name} should have raised error on fitting array y with inf" + " value." + ) + with raises(ValueError, match=match, err_msg=err_msg): + estimator.fit(X, y) + + +def _yield_regressor_checks(regressor): + tags = _safe_tags(regressor) + # TODO: test with intercept + # TODO: test with multiple responses + # basic testing + yield check_regressors_train + yield partial(check_regressors_train, readonly_memmap=True) + yield partial(check_regressors_train, readonly_memmap=True, X_dtype="float32") + yield check_regressor_data_not_an_array + yield check_estimators_partial_fit_n_features + if tags["multioutput"]: + yield check_regressor_multioutput + yield check_regressors_no_decision_function + if not tags["no_validation"] and not tags["multioutput_only"]: + yield check_supervised_y_2d + yield check_supervised_y_no_nan + name = regressor.__class__.__name__ + if name != "CCA": + # check that the regressor handles int input + yield check_regressors_int + if tags["requires_fit"]: + yield check_estimators_unfitted + yield check_non_transformer_estimators_n_iter + + +def _yield_transformer_checks(transformer): + tags = _safe_tags(transformer) + # All transformers should either deal with sparse data or raise an + # exception with type TypeError and an intelligible error message + if not tags["no_validation"]: + yield check_transformer_data_not_an_array + # these don't actually fit the data, so don't raise errors + yield check_transformer_general + if tags["preserves_dtype"]: + yield check_transformer_preserve_dtypes + yield partial(check_transformer_general, readonly_memmap=True) + if not _safe_tags(transformer, key="stateless"): + yield check_transformers_unfitted + else: + yield check_transformers_unfitted_stateless + # Dependent on external solvers and hence accessing the iter + # param is non-trivial. + external_solver = [ + "Isomap", + "KernelPCA", + "LocallyLinearEmbedding", + "RandomizedLasso", + "LogisticRegressionCV", + "BisectingKMeans", + ] + + name = transformer.__class__.__name__ + if name not in external_solver: + yield check_transformer_n_iter + + +def _yield_clustering_checks(clusterer): + yield check_clusterer_compute_labels_predict + name = clusterer.__class__.__name__ + if name not in ("WardAgglomeration", "FeatureAgglomeration"): + # this is clustering on the features + # let's not test that here. + yield check_clustering + yield partial(check_clustering, readonly_memmap=True) + yield check_estimators_partial_fit_n_features + if not hasattr(clusterer, "transform"): + yield check_non_transformer_estimators_n_iter + + +def _yield_outliers_checks(estimator): + # checks for the contamination parameter + if hasattr(estimator, "contamination"): + yield check_outlier_contamination + + # checks for outlier detectors that have a fit_predict method + if hasattr(estimator, "fit_predict"): + yield check_outliers_fit_predict + + # checks for estimators that can be used on a test set + if hasattr(estimator, "predict"): + yield check_outliers_train + yield partial(check_outliers_train, readonly_memmap=True) + # test outlier detectors can handle non-array data + yield check_classifier_data_not_an_array + # test if NotFittedError is raised + if _safe_tags(estimator, key="requires_fit"): + yield check_estimators_unfitted + yield check_non_transformer_estimators_n_iter + + +def _yield_array_api_checks(estimator): + for ( + array_namespace, + device, + dtype_name, + ) in yield_namespace_device_dtype_combinations(): + yield partial( + check_array_api_input, + array_namespace=array_namespace, + dtype_name=dtype_name, + device=device, + ) + + +def _yield_all_checks(estimator): + name = estimator.__class__.__name__ + tags = _safe_tags(estimator) + if "2darray" not in tags["X_types"]: + warnings.warn( + "Can't test estimator {} which requires input of type {}".format( + name, tags["X_types"] + ), + SkipTestWarning, + ) + return + if tags["_skip_test"]: + warnings.warn( + "Explicit SKIP via _skip_test tag for estimator {}.".format(name), + SkipTestWarning, + ) + return + + for check in _yield_checks(estimator): + yield check + if is_classifier(estimator): + for check in _yield_classifier_checks(estimator): + yield check + if is_regressor(estimator): + for check in _yield_regressor_checks(estimator): + yield check + if hasattr(estimator, "transform"): + for check in _yield_transformer_checks(estimator): + yield check + if isinstance(estimator, ClusterMixin): + for check in _yield_clustering_checks(estimator): + yield check + if is_outlier_detector(estimator): + for check in _yield_outliers_checks(estimator): + yield check + yield check_parameters_default_constructible + if not tags["non_deterministic"]: + yield check_methods_sample_order_invariance + yield check_methods_subset_invariance + yield check_fit2d_1sample + yield check_fit2d_1feature + yield check_get_params_invariance + yield check_set_params + yield check_dict_unchanged + yield check_dont_overwrite_parameters + yield check_fit_idempotent + yield check_fit_check_is_fitted + if not tags["no_validation"]: + yield check_n_features_in + yield check_fit1d + yield check_fit2d_predict1d + if tags["requires_y"]: + yield check_requires_y_none + if tags["requires_positive_X"]: + yield check_fit_non_negative + + +def _get_check_estimator_ids(obj): + """Create pytest ids for checks. + + When `obj` is an estimator, this returns the pprint version of the + estimator (with `print_changed_only=True`). When `obj` is a function, the + name of the function is returned with its keyword arguments. + + `_get_check_estimator_ids` is designed to be used as the `id` in + `pytest.mark.parametrize` where `check_estimator(..., generate_only=True)` + is yielding estimators and checks. + + Parameters + ---------- + obj : estimator or function + Items generated by `check_estimator`. + + Returns + ------- + id : str or None + + See Also + -------- + check_estimator + """ + if callable(obj): + if not isinstance(obj, partial): + return obj.__name__ + + if not obj.keywords: + return obj.func.__name__ + + kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()]) + return "{}({})".format(obj.func.__name__, kwstring) + if hasattr(obj, "get_params"): + with config_context(print_changed_only=True): + return re.sub(r"\s", "", str(obj)) + + +def _construct_instance(Estimator): + """Construct Estimator instance if possible.""" + required_parameters = getattr(Estimator, "_required_parameters", []) + if len(required_parameters): + if required_parameters in (["estimator"], ["base_estimator"]): + # `RANSACRegressor` will raise an error with any model other + # than `LinearRegression` if we don't fix `min_samples` parameter. + # For common test, we can enforce using `LinearRegression` that + # is the default estimator in `RANSACRegressor` instead of `Ridge`. + if issubclass(Estimator, RANSACRegressor): + estimator = Estimator(LinearRegression()) + elif issubclass(Estimator, RegressorMixin): + estimator = Estimator(Ridge()) + elif issubclass(Estimator, SelectFromModel): + # Increases coverage because SGDRegressor has partial_fit + estimator = Estimator(SGDRegressor(random_state=0)) + else: + estimator = Estimator(LogisticRegression(C=1)) + elif required_parameters in (["estimators"],): + # Heterogeneous ensemble classes (i.e. stacking, voting) + if issubclass(Estimator, RegressorMixin): + estimator = Estimator( + estimators=[ + ("est1", DecisionTreeRegressor(max_depth=3, random_state=0)), + ("est2", DecisionTreeRegressor(max_depth=3, random_state=1)), + ] + ) + else: + estimator = Estimator( + estimators=[ + ("est1", DecisionTreeClassifier(max_depth=3, random_state=0)), + ("est2", DecisionTreeClassifier(max_depth=3, random_state=1)), + ] + ) + else: + msg = ( + f"Can't instantiate estimator {Estimator.__name__} " + f"parameters {required_parameters}" + ) + # raise additional warning to be shown by pytest + warnings.warn(msg, SkipTestWarning) + raise SkipTest(msg) + else: + estimator = Estimator() + return estimator + + +def _maybe_mark_xfail(estimator, check, pytest): + # Mark (estimator, check) pairs as XFAIL if needed (see conditions in + # _should_be_skipped_or_marked()) + # This is similar to _maybe_skip(), but this one is used by + # @parametrize_with_checks() instead of check_estimator() + + should_be_marked, reason = _should_be_skipped_or_marked(estimator, check) + if not should_be_marked: + return estimator, check + else: + return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason)) + + +def _maybe_skip(estimator, check): + # Wrap a check so that it's skipped if needed (see conditions in + # _should_be_skipped_or_marked()) + # This is similar to _maybe_mark_xfail(), but this one is used by + # check_estimator() instead of @parametrize_with_checks which requires + # pytest + should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check) + if not should_be_skipped: + return check + + check_name = check.func.__name__ if isinstance(check, partial) else check.__name__ + + @wraps(check) + def wrapped(*args, **kwargs): + raise SkipTest( + f"Skipping {check_name} for {estimator.__class__.__name__}: {reason}" + ) + + return wrapped + + +def _should_be_skipped_or_marked(estimator, check): + # Return whether a check should be skipped (when using check_estimator()) + # or marked as XFAIL (when using @parametrize_with_checks()), along with a + # reason. + # Currently, a check should be skipped or marked if + # the check is in the _xfail_checks tag of the estimator + + check_name = check.func.__name__ if isinstance(check, partial) else check.__name__ + + xfail_checks = _safe_tags(estimator, key="_xfail_checks") or {} + if check_name in xfail_checks: + return True, xfail_checks[check_name] + + return False, "placeholder reason that will never be used" + + +def parametrize_with_checks(estimators): + """Pytest specific decorator for parametrizing estimator checks. + + The `id` of each check is set to be a pprint version of the estimator + and the name of the check with its keyword arguments. + This allows to use `pytest -k` to specify which tests to run:: + + pytest test_check_estimators.py -k check_estimators_fit_returns_self + + Parameters + ---------- + estimators : list of estimators instances + Estimators to generated checks for. + + .. versionchanged:: 0.24 + Passing a class was deprecated in version 0.23, and support for + classes was removed in 0.24. Pass an instance instead. + + .. versionadded:: 0.24 + + Returns + ------- + decorator : `pytest.mark.parametrize` + + See Also + -------- + check_estimator : Check if estimator adheres to scikit-learn conventions. + + Examples + -------- + >>> from sklearn.utils.estimator_checks import parametrize_with_checks + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.tree import DecisionTreeRegressor + + >>> @parametrize_with_checks([LogisticRegression(), + ... DecisionTreeRegressor()]) + ... def test_sklearn_compatible_estimator(estimator, check): + ... check(estimator) + + """ + import pytest + + if any(isinstance(est, type) for est in estimators): + msg = ( + "Passing a class was deprecated in version 0.23 " + "and isn't supported anymore from 0.24." + "Please pass an instance instead." + ) + raise TypeError(msg) + + def checks_generator(): + for estimator in estimators: + name = type(estimator).__name__ + for check in _yield_all_checks(estimator): + check = partial(check, name) + yield _maybe_mark_xfail(estimator, check, pytest) + + return pytest.mark.parametrize( + "estimator, check", checks_generator(), ids=_get_check_estimator_ids + ) + + +def check_estimator(estimator=None, generate_only=False): + """Check if estimator adheres to scikit-learn conventions. + + This function will run an extensive test-suite for input validation, + shapes, etc, making sure that the estimator complies with `scikit-learn` + conventions as detailed in :ref:`rolling_your_own_estimator`. + Additional tests for classifiers, regressors, clustering or transformers + will be run if the Estimator class inherits from the corresponding mixin + from sklearn.base. + + Setting `generate_only=True` returns a generator that yields (estimator, + check) tuples where the check can be called independently from each + other, i.e. `check(estimator)`. This allows all checks to be run + independently and report the checks that are failing. + + scikit-learn provides a pytest specific decorator, + :func:`~sklearn.utils.estimator_checks.parametrize_with_checks`, making it + easier to test multiple estimators. + + Parameters + ---------- + estimator : estimator object + Estimator instance to check. + + .. versionadded:: 1.1 + Passing a class was deprecated in version 0.23, and support for + classes was removed in 0.24. + + generate_only : bool, default=False + When `False`, checks are evaluated when `check_estimator` is called. + When `True`, `check_estimator` returns a generator that yields + (estimator, check) tuples. The check is run by calling + `check(estimator)`. + + .. versionadded:: 0.22 + + Returns + ------- + checks_generator : generator + Generator that yields (estimator, check) tuples. Returned when + `generate_only=True`. + + See Also + -------- + parametrize_with_checks : Pytest specific decorator for parametrizing estimator + checks. + + Examples + -------- + >>> from sklearn.utils.estimator_checks import check_estimator + >>> from sklearn.linear_model import LogisticRegression + >>> check_estimator(LogisticRegression(), generate_only=True) + + """ + if isinstance(estimator, type): + msg = ( + "Passing a class was deprecated in version 0.23 " + "and isn't supported anymore from 0.24." + "Please pass an instance instead." + ) + raise TypeError(msg) + + name = type(estimator).__name__ + + def checks_generator(): + for check in _yield_all_checks(estimator): + check = _maybe_skip(estimator, check) + yield estimator, partial(check, name) + + if generate_only: + return checks_generator() + + for estimator, check in checks_generator(): + try: + check(estimator) + except SkipTest as exception: + # SkipTest is thrown when pandas can't be imported, or by checks + # that are in the xfail_checks tag + warnings.warn(str(exception), SkipTestWarning) + + +def _regression_dataset(): + global REGRESSION_DATASET + if REGRESSION_DATASET is None: + X, y = make_regression( + n_samples=200, + n_features=10, + n_informative=1, + bias=5.0, + noise=20, + random_state=42, + ) + X = StandardScaler().fit_transform(X) + REGRESSION_DATASET = X, y + return REGRESSION_DATASET + + +def _set_checking_parameters(estimator): + # set parameters to speed up some estimators and + # avoid deprecated behaviour + params = estimator.get_params() + name = estimator.__class__.__name__ + if name == "TSNE": + estimator.set_params(perplexity=2) + if "n_iter" in params and name != "TSNE": + estimator.set_params(n_iter=5) + if "max_iter" in params: + if estimator.max_iter is not None: + estimator.set_params(max_iter=min(5, estimator.max_iter)) + # LinearSVR, LinearSVC + if name in ["LinearSVR", "LinearSVC"]: + estimator.set_params(max_iter=20) + # NMF + if name == "NMF": + estimator.set_params(max_iter=500) + # DictionaryLearning + if name == "DictionaryLearning": + estimator.set_params(max_iter=20, transform_algorithm="lasso_lars") + # MiniBatchNMF + if estimator.__class__.__name__ == "MiniBatchNMF": + estimator.set_params(max_iter=20, fresh_restarts=True) + # MLP + if name in ["MLPClassifier", "MLPRegressor"]: + estimator.set_params(max_iter=100) + # MiniBatchDictionaryLearning + if name == "MiniBatchDictionaryLearning": + estimator.set_params(max_iter=5) + + if "n_resampling" in params: + # randomized lasso + estimator.set_params(n_resampling=5) + if "n_estimators" in params: + estimator.set_params(n_estimators=min(5, estimator.n_estimators)) + if "max_trials" in params: + # RANSAC + estimator.set_params(max_trials=10) + if "n_init" in params: + # K-Means + estimator.set_params(n_init=2) + if "batch_size" in params and not name.startswith("MLP"): + estimator.set_params(batch_size=10) + + if name == "MeanShift": + # In the case of check_fit2d_1sample, bandwidth is set to None and + # is thus estimated. De facto it is 0.0 as a single sample is provided + # and this makes the test fails. Hence we give it a placeholder value. + estimator.set_params(bandwidth=1.0) + + if name == "TruncatedSVD": + # TruncatedSVD doesn't run with n_components = n_features + # This is ugly :-/ + estimator.n_components = 1 + + if name == "LassoLarsIC": + # Noise variance estimation does not work when `n_samples < n_features`. + # We need to provide the noise variance explicitly. + estimator.set_params(noise_variance=1.0) + + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = min(estimator.n_clusters, 2) + + if hasattr(estimator, "n_best"): + estimator.n_best = 1 + + if name == "SelectFdr": + # be tolerant of noisy datasets (not actually speed) + estimator.set_params(alpha=0.5) + + if name == "TheilSenRegressor": + estimator.max_subpopulation = 100 + + if isinstance(estimator, BaseRandomProjection): + # Due to the jl lemma and often very few samples, the number + # of components of the random matrix projection will be probably + # greater than the number of features. + # So we impose a smaller number (avoid "auto" mode) + estimator.set_params(n_components=2) + + if isinstance(estimator, SelectKBest): + # SelectKBest has a default of k=10 + # which is more feature than we have in most case. + estimator.set_params(k=1) + + if name in ("HistGradientBoostingClassifier", "HistGradientBoostingRegressor"): + # The default min_samples_leaf (20) isn't appropriate for small + # datasets (only very shallow trees are built) that the checks use. + estimator.set_params(min_samples_leaf=5) + + if name == "DummyClassifier": + # the default strategy prior would output constant predictions and fail + # for check_classifiers_predictions + estimator.set_params(strategy="stratified") + + # Speed-up by reducing the number of CV or splits for CV estimators + loo_cv = ["RidgeCV", "RidgeClassifierCV"] + if name not in loo_cv and hasattr(estimator, "cv"): + estimator.set_params(cv=3) + if hasattr(estimator, "n_splits"): + estimator.set_params(n_splits=3) + + if name == "OneHotEncoder": + estimator.set_params(handle_unknown="ignore") + + if name == "QuantileRegressor": + # Avoid warning due to Scipy deprecating interior-point solver + solver = "highs" if sp_version >= parse_version("1.6.0") else "interior-point" + estimator.set_params(solver=solver) + + if name in CROSS_DECOMPOSITION: + estimator.set_params(n_components=1) + + # Default "auto" parameter can lead to different ordering of eigenvalues on + # windows: #24105 + if name == "SpectralEmbedding": + estimator.set_params(eigen_tol=1e-5) + + if name == "HDBSCAN": + estimator.set_params(min_samples=1) + + +class _NotAnArray: + """An object that is convertible to an array. + + Parameters + ---------- + data : array-like + The data. + """ + + def __init__(self, data): + self.data = np.asarray(data) + + def __array__(self, dtype=None): + return self.data + + def __array_function__(self, func, types, args, kwargs): + if func.__name__ == "may_share_memory": + return True + raise TypeError("Don't want to call array_function {}!".format(func.__name__)) + + +def _is_pairwise_metric(estimator): + """Returns True if estimator accepts pairwise metric. + + Parameters + ---------- + estimator : object + Estimator object to test. + + Returns + ------- + out : bool + True if _pairwise is set to True and False otherwise. + """ + metric = getattr(estimator, "metric", None) + + return bool(metric == "precomputed") + + +def _generate_sparse_matrix(X_csr): + """Generate sparse matrices with {32,64}bit indices of diverse format. + + Parameters + ---------- + X_csr: CSR Matrix + Input matrix in CSR format. + + Returns + ------- + out: iter(Matrices) + In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo', + 'coo_64', 'csc_64', 'csr_64'] + """ + + assert X_csr.format == "csr" + yield "csr", X_csr.copy() + for sparse_format in ["dok", "lil", "dia", "bsr", "csc", "coo"]: + yield sparse_format, X_csr.asformat(sparse_format) + + # Generate large indices matrix only if its supported by scipy + X_coo = X_csr.asformat("coo") + X_coo.row = X_coo.row.astype("int64") + X_coo.col = X_coo.col.astype("int64") + yield "coo_64", X_coo + + for sparse_format in ["csc", "csr"]: + X = X_csr.asformat(sparse_format) + X.indices = X.indices.astype("int64") + X.indptr = X.indptr.astype("int64") + yield sparse_format + "_64", X + + +def check_array_api_input( + name, + estimator_orig, + array_namespace, + device=None, + dtype_name="float64", + check_values=False, +): + """Check that the estimator can work consistently with the Array API + + By default, this just checks that the types and shapes of the arrays are + consistent with calling the same estimator with numpy arrays. + + When check_values is True, it also checks that calling the estimator on the + array_api Array gives the same results as ndarrays. + """ + xp = _array_api_for_tests(array_namespace, device) + + X, y = make_classification(random_state=42) + X = X.astype(dtype_name, copy=False) + + X = _enforce_estimator_tags_X(estimator_orig, X) + y = _enforce_estimator_tags_y(estimator_orig, y) + + est = clone(estimator_orig) + + X_xp = xp.asarray(X, device=device) + y_xp = xp.asarray(y, device=device) + + est.fit(X, y) + + array_attributes = { + key: value for key, value in vars(est).items() if isinstance(value, np.ndarray) + } + + est_xp = clone(est) + with config_context(array_api_dispatch=True): + est_xp.fit(X_xp, y_xp) + input_ns = get_namespace(X_xp)[0].__name__ + + # Fitted attributes which are arrays must have the same + # namespace as the one of the training data. + for key, attribute in array_attributes.items(): + est_xp_param = getattr(est_xp, key) + with config_context(array_api_dispatch=True): + attribute_ns = get_namespace(est_xp_param)[0].__name__ + assert attribute_ns == input_ns, ( + f"'{key}' attribute is in wrong namespace, expected {input_ns} " + f"got {attribute_ns}" + ) + + assert array_device(est_xp_param) == array_device(X_xp) + + est_xp_param_np = _convert_to_numpy(est_xp_param, xp=xp) + if check_values: + assert_allclose( + attribute, + est_xp_param_np, + err_msg=f"{key} not the same", + atol=np.finfo(X.dtype).eps * 100, + ) + else: + assert attribute.shape == est_xp_param_np.shape + assert attribute.dtype == est_xp_param_np.dtype + + # Check estimator methods, if supported, give the same results + methods = ( + "score", + "score_samples", + "decision_function", + "predict", + "predict_log_proba", + "predict_proba", + "transform", + ) + + for method_name in methods: + method = getattr(est, method_name, None) + if method is None: + continue + + if method_name == "score": + result = method(X, y) + with config_context(array_api_dispatch=True): + result_xp = getattr(est_xp, method_name)(X_xp, y_xp) + # score typically returns a Python float + assert isinstance(result, float) + assert isinstance(result_xp, float) + if check_values: + assert abs(result - result_xp) < np.finfo(X.dtype).eps * 100 + continue + else: + result = method(X) + with config_context(array_api_dispatch=True): + result_xp = getattr(est_xp, method_name)(X_xp) + + with config_context(array_api_dispatch=True): + result_ns = get_namespace(result_xp)[0].__name__ + assert result_ns == input_ns, ( + f"'{method}' output is in wrong namespace, expected {input_ns}, " + f"got {result_ns}." + ) + + assert array_device(result_xp) == array_device(X_xp) + result_xp_np = _convert_to_numpy(result_xp, xp=xp) + + if check_values: + assert_allclose( + result, + result_xp_np, + err_msg=f"{method} did not the return the same result", + atol=np.finfo(X.dtype).eps * 100, + ) + else: + if hasattr(result, "shape"): + assert result.shape == result_xp_np.shape + assert result.dtype == result_xp_np.dtype + + if method_name == "transform" and hasattr(est, "inverse_transform"): + inverse_result = est.inverse_transform(result) + with config_context(array_api_dispatch=True): + invese_result_xp = est_xp.inverse_transform(result_xp) + inverse_result_ns = get_namespace(invese_result_xp)[0].__name__ + assert inverse_result_ns == input_ns, ( + "'inverse_transform' output is in wrong namespace, expected" + f" {input_ns}, got {inverse_result_ns}." + ) + + assert array_device(invese_result_xp) == array_device(X_xp) + + invese_result_xp_np = _convert_to_numpy(invese_result_xp, xp=xp) + if check_values: + assert_allclose( + inverse_result, + invese_result_xp_np, + err_msg="inverse_transform did not the return the same result", + atol=np.finfo(X.dtype).eps * 100, + ) + else: + assert inverse_result.shape == invese_result_xp_np.shape + assert inverse_result.dtype == invese_result_xp_np.dtype + + +def check_array_api_input_and_values( + name, + estimator_orig, + array_namespace, + device=None, + dtype_name="float64", +): + return check_array_api_input( + name, + estimator_orig, + array_namespace=array_namespace, + device=device, + dtype_name=dtype_name, + check_values=True, + ) + + +def check_estimator_sparse_data(name, estimator_orig): + rng = np.random.RandomState(0) + X = rng.uniform(size=(40, 3)) + X[X < 0.8] = 0 + X = _enforce_estimator_tags_X(estimator_orig, X) + X_csr = sparse.csr_matrix(X) + y = (4 * rng.uniform(size=40)).astype(int) + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + tags = _safe_tags(estimator_orig) + for matrix_format, X in _generate_sparse_matrix(X_csr): + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + estimator = clone(estimator_orig) + if name in ["Scaler", "StandardScaler"]: + estimator.set_params(with_mean=False) + # fit and predict + if "64" in matrix_format: + err_msg = ( + f"Estimator {name} doesn't seem to support {matrix_format} " + "matrix, and is not failing gracefully, e.g. by using " + "check_array(X, accept_large_sparse=False)" + ) + else: + err_msg = ( + f"Estimator {name} doesn't seem to fail gracefully on sparse " + "data: error message should state explicitly that sparse " + "input is not supported if this is not the case." + ) + with raises( + (TypeError, ValueError), + match=["sparse", "Sparse"], + may_pass=True, + err_msg=err_msg, + ): + with ignore_warnings(category=FutureWarning): + estimator.fit(X, y) + if hasattr(estimator, "predict"): + pred = estimator.predict(X) + if tags["multioutput_only"]: + assert pred.shape == (X.shape[0], 1) + else: + assert pred.shape == (X.shape[0],) + if hasattr(estimator, "predict_proba"): + probs = estimator.predict_proba(X) + if tags["binary_only"]: + expected_probs_shape = (X.shape[0], 2) + else: + expected_probs_shape = (X.shape[0], 4) + assert probs.shape == expected_probs_shape + + +@ignore_warnings(category=FutureWarning) +def check_sample_weights_pandas_series(name, estimator_orig): + # check that estimators will accept a 'sample_weight' parameter of + # type pandas.Series in the 'fit' function. + estimator = clone(estimator_orig) + try: + import pandas as pd + + X = np.array( + [ + [1, 1], + [1, 2], + [1, 3], + [1, 4], + [2, 1], + [2, 2], + [2, 3], + [2, 4], + [3, 1], + [3, 2], + [3, 3], + [3, 4], + ] + ) + X = pd.DataFrame(_enforce_estimator_tags_X(estimator_orig, X), copy=False) + y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2]) + weights = pd.Series([1] * 12) + if _safe_tags(estimator, key="multioutput_only"): + y = pd.DataFrame(y, copy=False) + try: + estimator.fit(X, y, sample_weight=weights) + except ValueError: + raise ValueError( + "Estimator {0} raises error if " + "'sample_weight' parameter is of " + "type pandas.Series".format(name) + ) + except ImportError: + raise SkipTest( + "pandas is not installed: not testing for " + "input of type pandas.Series to class weight." + ) + + +@ignore_warnings(category=(FutureWarning)) +def check_sample_weights_not_an_array(name, estimator_orig): + # check that estimators will accept a 'sample_weight' parameter of + # type _NotAnArray in the 'fit' function. + estimator = clone(estimator_orig) + X = np.array( + [ + [1, 1], + [1, 2], + [1, 3], + [1, 4], + [2, 1], + [2, 2], + [2, 3], + [2, 4], + [3, 1], + [3, 2], + [3, 3], + [3, 4], + ] + ) + X = _NotAnArray(_enforce_estimator_tags_X(estimator_orig, X)) + y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2]) + weights = _NotAnArray([1] * 12) + if _safe_tags(estimator, key="multioutput_only"): + y = _NotAnArray(y.data.reshape(-1, 1)) + estimator.fit(X, y, sample_weight=weights) + + +@ignore_warnings(category=(FutureWarning)) +def check_sample_weights_list(name, estimator_orig): + # check that estimators will accept a 'sample_weight' parameter of + # type list in the 'fit' function. + estimator = clone(estimator_orig) + rnd = np.random.RandomState(0) + n_samples = 30 + X = _enforce_estimator_tags_X(estimator_orig, rnd.uniform(size=(n_samples, 3))) + y = np.arange(n_samples) % 3 + y = _enforce_estimator_tags_y(estimator, y) + sample_weight = [3] * n_samples + # Test that estimators don't raise any exception + estimator.fit(X, y, sample_weight=sample_weight) + + +@ignore_warnings(category=FutureWarning) +def check_sample_weights_shape(name, estimator_orig): + # check that estimators raise an error if sample_weight + # shape mismatches the input + estimator = clone(estimator_orig) + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ] + ) + y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2]) + y = _enforce_estimator_tags_y(estimator, y) + + estimator.fit(X, y, sample_weight=np.ones(len(y))) + + with raises(ValueError): + estimator.fit(X, y, sample_weight=np.ones(2 * len(y))) + + with raises(ValueError): + estimator.fit(X, y, sample_weight=np.ones((len(y), 2))) + + +@ignore_warnings(category=FutureWarning) +def check_sample_weights_invariance(name, estimator_orig, kind="ones"): + # For kind="ones" check that the estimators yield same results for + # unit weights and no weights + # For kind="zeros" check that setting sample_weight to 0 is equivalent + # to removing corresponding samples. + estimator1 = clone(estimator_orig) + estimator2 = clone(estimator_orig) + set_random_state(estimator1, random_state=0) + set_random_state(estimator2, random_state=0) + + X1 = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.float64, + ) + y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int) + + if kind == "ones": + X2 = X1 + y2 = y1 + sw2 = np.ones(shape=len(y1)) + err_msg = ( + f"For {name} sample_weight=None is not equivalent to sample_weight=ones" + ) + elif kind == "zeros": + # Construct a dataset that is very different to (X, y) if weights + # are disregarded, but identical to (X, y) given weights. + X2 = np.vstack([X1, X1 + 1]) + y2 = np.hstack([y1, 3 - y1]) + sw2 = np.ones(shape=len(y1) * 2) + sw2[len(y1) :] = 0 + X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0) + + err_msg = ( + f"For {name}, a zero sample_weight is not equivalent to removing the sample" + ) + else: # pragma: no cover + raise ValueError + + y1 = _enforce_estimator_tags_y(estimator1, y1) + y2 = _enforce_estimator_tags_y(estimator2, y2) + + estimator1.fit(X1, y=y1, sample_weight=None) + estimator2.fit(X2, y=y2, sample_weight=sw2) + + for method in ["predict", "predict_proba", "decision_function", "transform"]: + if hasattr(estimator_orig, method): + X_pred1 = getattr(estimator1, method)(X1) + X_pred2 = getattr(estimator2, method)(X1) + assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg) + + +def check_sample_weights_not_overwritten(name, estimator_orig): + # check that estimators don't override the passed sample_weight parameter + estimator = clone(estimator_orig) + set_random_state(estimator, random_state=0) + + X = np.array( + [ + [1, 3], + [1, 3], + [1, 3], + [1, 3], + [2, 1], + [2, 1], + [2, 1], + [2, 1], + [3, 3], + [3, 3], + [3, 3], + [3, 3], + [4, 1], + [4, 1], + [4, 1], + [4, 1], + ], + dtype=np.float64, + ) + y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int) + y = _enforce_estimator_tags_y(estimator, y) + + sample_weight_original = np.ones(y.shape[0]) + sample_weight_original[0] = 10.0 + + sample_weight_fit = sample_weight_original.copy() + + estimator.fit(X, y, sample_weight=sample_weight_fit) + + err_msg = f"{name} overwrote the original `sample_weight` given during fit" + assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg) + + +@ignore_warnings(category=(FutureWarning, UserWarning)) +def check_dtype_object(name, estimator_orig): + # check that estimators treat dtype object as numeric if possible + rng = np.random.RandomState(0) + X = _enforce_estimator_tags_X(estimator_orig, rng.uniform(size=(40, 10))) + X = X.astype(object) + tags = _safe_tags(estimator_orig) + y = (X[:, 0] * 4).astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + estimator.fit(X, y) + if hasattr(estimator, "predict"): + estimator.predict(X) + + if hasattr(estimator, "transform"): + estimator.transform(X) + + with raises(Exception, match="Unknown label type", may_pass=True): + estimator.fit(X, y.astype(object)) + + if "string" not in tags["X_types"]: + X[0, 0] = {"foo": "bar"} + # This error is raised by: + # - `np.asarray` in `check_array` + # - `_unique_python` for encoders + msg = "argument must be .* string.* number" + with raises(TypeError, match=msg): + estimator.fit(X, y) + else: + # Estimators supporting string will not call np.asarray to convert the + # data to numeric and therefore, the error will not be raised. + # Checking for each element dtype in the input array will be costly. + # Refer to #11401 for full discussion. + estimator.fit(X, y) + + +def check_complex_data(name, estimator_orig): + rng = np.random.RandomState(42) + # check that estimators raise an exception on providing complex data + X = rng.uniform(size=10) + 1j * rng.uniform(size=10) + X = X.reshape(-1, 1) + + # Something both valid for classification and regression + y = rng.randint(low=0, high=2, size=10) + 1j + estimator = clone(estimator_orig) + set_random_state(estimator, random_state=0) + with raises(ValueError, match="Complex data not supported"): + estimator.fit(X, y) + + +@ignore_warnings +def check_dict_unchanged(name, estimator_orig): + # this estimator raises + # ValueError: Found array with 0 feature(s) (shape=(23, 0)) + # while a minimum of 1 is required. + # error + if name in ["SpectralCoclustering"]: + return + rnd = np.random.RandomState(0) + if name in ["RANSACRegressor"]: + X = 3 * rnd.uniform(size=(20, 3)) + else: + X = 2 * rnd.uniform(size=(20, 3)) + + X = _enforce_estimator_tags_X(estimator_orig, X) + + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + if hasattr(estimator, "n_best"): + estimator.n_best = 1 + + set_random_state(estimator, 1) + + estimator.fit(X, y) + for method in ["predict", "transform", "decision_function", "predict_proba"]: + if hasattr(estimator, method): + dict_before = estimator.__dict__.copy() + getattr(estimator, method)(X) + assert estimator.__dict__ == dict_before, ( + "Estimator changes __dict__ during %s" % method + ) + + +def _is_public_parameter(attr): + return not (attr.startswith("_") or attr.endswith("_")) + + +@ignore_warnings(category=FutureWarning) +def check_dont_overwrite_parameters(name, estimator_orig): + # check that fit method only changes or sets private attributes + if hasattr(estimator_orig.__init__, "deprecated_original"): + # to not check deprecated classes + return + estimator = clone(estimator_orig) + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + dict_before_fit = estimator.__dict__.copy() + estimator.fit(X, y) + + dict_after_fit = estimator.__dict__ + + public_keys_after_fit = [ + key for key in dict_after_fit.keys() if _is_public_parameter(key) + ] + + attrs_added_by_fit = [ + key for key in public_keys_after_fit if key not in dict_before_fit.keys() + ] + + # check that fit doesn't add any public attribute + assert not attrs_added_by_fit, ( + "Estimator adds public attribute(s) during" + " the fit method." + " Estimators are only allowed to add private attributes" + " either started with _ or ended" + " with _ but %s added" + % ", ".join(attrs_added_by_fit) + ) + + # check that fit doesn't change any public attribute + attrs_changed_by_fit = [ + key + for key in public_keys_after_fit + if (dict_before_fit[key] is not dict_after_fit[key]) + ] + + assert not attrs_changed_by_fit, ( + "Estimator changes public attribute(s) during" + " the fit method. Estimators are only allowed" + " to change attributes started" + " or ended with _, but" + " %s changed" + % ", ".join(attrs_changed_by_fit) + ) + + +@ignore_warnings(category=FutureWarning) +def check_fit2d_predict1d(name, estimator_orig): + # check by fitting a 2d array and predicting with a 1d array + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + estimator.fit(X, y) + + for method in ["predict", "transform", "decision_function", "predict_proba"]: + if hasattr(estimator, method): + assert_raise_message( + ValueError, "Reshape your data", getattr(estimator, method), X[0] + ) + + +def _apply_on_subsets(func, X): + # apply function on the whole set and on mini batches + result_full = func(X) + n_features = X.shape[1] + result_by_batch = [func(batch.reshape(1, n_features)) for batch in X] + + # func can output tuple (e.g. score_samples) + if type(result_full) == tuple: + result_full = result_full[0] + result_by_batch = list(map(lambda x: x[0], result_by_batch)) + + if sparse.issparse(result_full): + result_full = result_full.toarray() + result_by_batch = [x.toarray() for x in result_by_batch] + + return np.ravel(result_full), np.ravel(result_by_batch) + + +@ignore_warnings(category=FutureWarning) +def check_methods_subset_invariance(name, estimator_orig): + # check that method gives invariant results if applied + # on mini batches or the whole set + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + estimator.fit(X, y) + + for method in [ + "predict", + "transform", + "decision_function", + "score_samples", + "predict_proba", + ]: + msg = ("{method} of {name} is not invariant when applied to a subset.").format( + method=method, name=name + ) + + if hasattr(estimator, method): + result_full, result_by_batch = _apply_on_subsets( + getattr(estimator, method), X + ) + assert_allclose(result_full, result_by_batch, atol=1e-7, err_msg=msg) + + +@ignore_warnings(category=FutureWarning) +def check_methods_sample_order_invariance(name, estimator_orig): + # check that method gives invariant results if applied + # on a subset with different sample order + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(np.int64) + if _safe_tags(estimator_orig, key="binary_only"): + y[y == 2] = 1 + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 2 + + set_random_state(estimator, 1) + estimator.fit(X, y) + + idx = np.random.permutation(X.shape[0]) + + for method in [ + "predict", + "transform", + "decision_function", + "score_samples", + "predict_proba", + ]: + msg = ( + "{method} of {name} is not invariant when applied to a dataset" + "with different sample order." + ).format(method=method, name=name) + + if hasattr(estimator, method): + assert_allclose_dense_sparse( + getattr(estimator, method)(X)[idx], + getattr(estimator, method)(X[idx]), + atol=1e-9, + err_msg=msg, + ) + + +@ignore_warnings +def check_fit2d_1sample(name, estimator_orig): + # Check that fitting a 2d array with only one sample either works or + # returns an informative message. The error message should either mention + # the number of samples or the number of classes. + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(1, 10)) + X = _enforce_estimator_tags_X(estimator_orig, X) + + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + + # min_cluster_size cannot be less than the data size for OPTICS. + if name == "OPTICS": + estimator.set_params(min_samples=1.0) + + # perplexity cannot be more than the number of samples for TSNE. + if name == "TSNE": + estimator.set_params(perplexity=0.5) + + msgs = [ + "1 sample", + "n_samples = 1", + "n_samples=1", + "one sample", + "1 class", + "one class", + ] + + with raises(ValueError, match=msgs, may_pass=True): + estimator.fit(X, y) + + +@ignore_warnings +def check_fit2d_1feature(name, estimator_orig): + # check fitting a 2d array with only 1 feature either works or returns + # informative message + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(10, 1)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = X[:, 0].astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + # ensure two labels in subsample for RandomizedLogisticRegression + if name == "RandomizedLogisticRegression": + estimator.sample_fraction = 1 + # ensure non skipped trials for RANSACRegressor + if name == "RANSACRegressor": + estimator.residual_threshold = 0.5 + + y = _enforce_estimator_tags_y(estimator, y) + set_random_state(estimator, 1) + + msgs = [r"1 feature\(s\)", "n_features = 1", "n_features=1"] + + with raises(ValueError, match=msgs, may_pass=True): + estimator.fit(X, y) + + +@ignore_warnings +def check_fit1d(name, estimator_orig): + # check fitting 1d X array raises a ValueError + rnd = np.random.RandomState(0) + X = 3 * rnd.uniform(size=(20)) + y = X.astype(int) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if hasattr(estimator, "n_components"): + estimator.n_components = 1 + if hasattr(estimator, "n_clusters"): + estimator.n_clusters = 1 + + set_random_state(estimator, 1) + with raises(ValueError): + estimator.fit(X, y) + + +@ignore_warnings(category=FutureWarning) +def check_transformer_general(name, transformer, readonly_memmap=False): + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + X = _enforce_estimator_tags_X(transformer, X) + + if readonly_memmap: + X, y = create_memmap_backed_data([X, y]) + + _check_transformer(name, transformer, X, y) + + +@ignore_warnings(category=FutureWarning) +def check_transformer_data_not_an_array(name, transformer): + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + X = _enforce_estimator_tags_X(transformer, X) + this_X = _NotAnArray(X) + this_y = _NotAnArray(np.asarray(y)) + _check_transformer(name, transformer, this_X, this_y) + # try the same with some list + _check_transformer(name, transformer, X.tolist(), y.tolist()) + + +@ignore_warnings(category=FutureWarning) +def check_transformers_unfitted(name, transformer): + X, y = _regression_dataset() + + transformer = clone(transformer) + with raises( + (AttributeError, ValueError), + err_msg=( + "The unfitted " + f"transformer {name} does not raise an error when " + "transform is called. Perhaps use " + "check_is_fitted in transform." + ), + ): + transformer.transform(X) + + +@ignore_warnings(category=FutureWarning) +def check_transformers_unfitted_stateless(name, transformer): + """Check that using transform without prior fitting + doesn't raise a NotFittedError for stateless transformers. + """ + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 5)) + X = _enforce_estimator_tags_X(transformer, X) + + transformer = clone(transformer) + X_trans = transformer.transform(X) + + assert X_trans.shape[0] == X.shape[0] + + +def _check_transformer(name, transformer_orig, X, y): + n_samples, n_features = np.asarray(X).shape + transformer = clone(transformer_orig) + set_random_state(transformer) + + # fit + + if name in CROSS_DECOMPOSITION: + y_ = np.c_[np.asarray(y), np.asarray(y)] + y_[::2, 1] *= 2 + if isinstance(X, _NotAnArray): + y_ = _NotAnArray(y_) + else: + y_ = y + + transformer.fit(X, y_) + # fit_transform method should work on non fitted estimator + transformer_clone = clone(transformer) + X_pred = transformer_clone.fit_transform(X, y=y_) + + if isinstance(X_pred, tuple): + for x_pred in X_pred: + assert x_pred.shape[0] == n_samples + else: + # check for consistent n_samples + assert X_pred.shape[0] == n_samples + + if hasattr(transformer, "transform"): + if name in CROSS_DECOMPOSITION: + X_pred2 = transformer.transform(X, y_) + X_pred3 = transformer.fit_transform(X, y=y_) + else: + X_pred2 = transformer.transform(X) + X_pred3 = transformer.fit_transform(X, y=y_) + + if _safe_tags(transformer_orig, key="non_deterministic"): + msg = name + " is non deterministic" + raise SkipTest(msg) + if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): + for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): + assert_allclose_dense_sparse( + x_pred, + x_pred2, + atol=1e-2, + err_msg="fit_transform and transform outcomes not consistent in %s" + % transformer, + ) + assert_allclose_dense_sparse( + x_pred, + x_pred3, + atol=1e-2, + err_msg="consecutive fit_transform outcomes not consistent in %s" + % transformer, + ) + else: + assert_allclose_dense_sparse( + X_pred, + X_pred2, + err_msg="fit_transform and transform outcomes not consistent in %s" + % transformer, + atol=1e-2, + ) + assert_allclose_dense_sparse( + X_pred, + X_pred3, + atol=1e-2, + err_msg="consecutive fit_transform outcomes not consistent in %s" + % transformer, + ) + assert _num_samples(X_pred2) == n_samples + assert _num_samples(X_pred3) == n_samples + + # raises error on malformed input for transform + if ( + hasattr(X, "shape") + and not _safe_tags(transformer, key="stateless") + and X.ndim == 2 + and X.shape[1] > 1 + ): + # If it's not an array, it does not have a 'T' property + with raises( + ValueError, + err_msg=( + f"The transformer {name} does not raise an error " + "when the number of features in transform is different from " + "the number of features in fit." + ), + ): + transformer.transform(X[:, :-1]) + + +@ignore_warnings +def check_pipeline_consistency(name, estimator_orig): + if _safe_tags(estimator_orig, key="non_deterministic"): + msg = name + " is non deterministic" + raise SkipTest(msg) + + # check that make_pipeline(est) gives same score as est + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + set_random_state(estimator) + pipeline = make_pipeline(estimator) + estimator.fit(X, y) + pipeline.fit(X, y) + + funcs = ["score", "fit_transform"] + + for func_name in funcs: + func = getattr(estimator, func_name, None) + if func is not None: + func_pipeline = getattr(pipeline, func_name) + result = func(X, y) + result_pipe = func_pipeline(X, y) + assert_allclose_dense_sparse(result, result_pipe) + + +@ignore_warnings +def check_fit_score_takes_y(name, estimator_orig): + # check that all estimators accept an optional y + # in fit and score so they can be used in pipelines + rnd = np.random.RandomState(0) + n_samples = 30 + X = rnd.uniform(size=(n_samples, 3)) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = np.arange(n_samples) % 3 + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + set_random_state(estimator) + + funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"] + for func_name in funcs: + func = getattr(estimator, func_name, None) + if func is not None: + func(X, y) + args = [p.name for p in signature(func).parameters.values()] + if args[0] == "self": + # available_if makes methods into functions + # with an explicit "self", so need to shift arguments + args = args[1:] + assert args[1] in ["y", "Y"], ( + "Expected y or Y as second argument for method " + "%s of %s. Got arguments: %r." + % (func_name, type(estimator).__name__, args) + ) + + +@ignore_warnings +def check_estimators_dtypes(name, estimator_orig): + rnd = np.random.RandomState(0) + X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) + X_train_32 = _enforce_estimator_tags_X(estimator_orig, X_train_32) + X_train_64 = X_train_32.astype(np.float64) + X_train_int_64 = X_train_32.astype(np.int64) + X_train_int_32 = X_train_32.astype(np.int32) + y = X_train_int_64[:, 0] + y = _enforce_estimator_tags_y(estimator_orig, y) + + methods = ["predict", "transform", "decision_function", "predict_proba"] + + for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: + estimator = clone(estimator_orig) + set_random_state(estimator, 1) + estimator.fit(X_train, y) + + for method in methods: + if hasattr(estimator, method): + getattr(estimator, method)(X_train) + + +def check_transformer_preserve_dtypes(name, transformer_orig): + # check that dtype are preserved meaning if input X is of some dtype + # X_transformed should be from the same dtype. + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + X = _enforce_estimator_tags_X(transformer_orig, X) + + for dtype in _safe_tags(transformer_orig, key="preserves_dtype"): + X_cast = X.astype(dtype) + transformer = clone(transformer_orig) + set_random_state(transformer) + X_trans1 = transformer.fit_transform(X_cast, y) + X_trans2 = transformer.fit(X_cast, y).transform(X_cast) + + for Xt, method in zip([X_trans1, X_trans2], ["fit_transform", "transform"]): + if isinstance(Xt, tuple): + # cross-decompostion returns a tuple of (x_scores, y_scores) + # when given y with fit_transform; only check the first element + Xt = Xt[0] + + # check that the output dtype is preserved + assert Xt.dtype == dtype, ( + f"{name} (method={method}) does not preserve dtype. " + f"Original/Expected dtype={dtype.__name__}, got dtype={Xt.dtype}." + ) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_empty_data_messages(name, estimator_orig): + e = clone(estimator_orig) + set_random_state(e, 1) + + X_zero_samples = np.empty(0).reshape(0, 3) + # The precise message can change depending on whether X or y is + # validated first. Let us test the type of exception only: + err_msg = ( + f"The estimator {name} does not raise a ValueError when an " + "empty data is used to train. Perhaps use check_array in train." + ) + with raises(ValueError, err_msg=err_msg): + e.fit(X_zero_samples, []) + + X_zero_features = np.empty(0).reshape(12, 0) + # the following y should be accepted by both classifiers and regressors + # and ignored by unsupervised models + y = _enforce_estimator_tags_y(e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])) + msg = r"0 feature\(s\) \(shape=\(\d*, 0\)\) while a minimum of \d* " "is required." + with raises(ValueError, match=msg): + e.fit(X_zero_features, y) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_nan_inf(name, estimator_orig): + # Checks that Estimator X's do not contain NaN or inf. + rnd = np.random.RandomState(0) + X_train_finite = _enforce_estimator_tags_X( + estimator_orig, rnd.uniform(size=(10, 3)) + ) + X_train_nan = rnd.uniform(size=(10, 3)) + X_train_nan[0, 0] = np.nan + X_train_inf = rnd.uniform(size=(10, 3)) + X_train_inf[0, 0] = np.inf + y = np.ones(10) + y[:5] = 0 + y = _enforce_estimator_tags_y(estimator_orig, y) + error_string_fit = f"Estimator {name} doesn't check for NaN and inf in fit." + error_string_predict = f"Estimator {name} doesn't check for NaN and inf in predict." + error_string_transform = ( + f"Estimator {name} doesn't check for NaN and inf in transform." + ) + for X_train in [X_train_nan, X_train_inf]: + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + estimator = clone(estimator_orig) + set_random_state(estimator, 1) + # try to fit + with raises(ValueError, match=["inf", "NaN"], err_msg=error_string_fit): + estimator.fit(X_train, y) + # actually fit + estimator.fit(X_train_finite, y) + + # predict + if hasattr(estimator, "predict"): + with raises( + ValueError, + match=["inf", "NaN"], + err_msg=error_string_predict, + ): + estimator.predict(X_train) + + # transform + if hasattr(estimator, "transform"): + with raises( + ValueError, + match=["inf", "NaN"], + err_msg=error_string_transform, + ): + estimator.transform(X_train) + + +@ignore_warnings +def check_nonsquare_error(name, estimator_orig): + """Test that error is thrown when non-square data provided.""" + + X, y = make_blobs(n_samples=20, n_features=10) + estimator = clone(estimator_orig) + + with raises( + ValueError, + err_msg=( + f"The pairwise estimator {name} does not raise an error on non-square data" + ), + ): + estimator.fit(X, y) + + +@ignore_warnings +def check_estimators_pickle(name, estimator_orig, readonly_memmap=False): + """Test that we can pickle all estimators.""" + check_methods = ["predict", "transform", "decision_function", "predict_proba"] + + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + + X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel) + + tags = _safe_tags(estimator_orig) + # include NaN values when the estimator should deal with them + if tags["allow_nan"]: + # set randomly 10 elements to np.nan + rng = np.random.RandomState(42) + mask = rng.choice(X.size, 10, replace=False) + X.reshape(-1)[mask] = np.nan + + estimator = clone(estimator_orig) + + y = _enforce_estimator_tags_y(estimator, y) + + set_random_state(estimator) + estimator.fit(X, y) + + if readonly_memmap: + unpickled_estimator = create_memmap_backed_data(estimator) + else: + # No need to touch the file system in that case. + pickled_estimator = pickle.dumps(estimator) + module_name = estimator.__module__ + if module_name.startswith("sklearn.") and not ( + "test_" in module_name or module_name.endswith("_testing") + ): + # strict check for sklearn estimators that are not implemented in test + # modules. + assert b"_sklearn_version" in pickled_estimator + unpickled_estimator = pickle.loads(pickled_estimator) + + result = dict() + for method in check_methods: + if hasattr(estimator, method): + result[method] = getattr(estimator, method)(X) + + for method in result: + unpickled_result = getattr(unpickled_estimator, method)(X) + assert_allclose_dense_sparse(result[method], unpickled_result) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_partial_fit_n_features(name, estimator_orig): + # check if number of features changes between calls to partial_fit. + if not hasattr(estimator_orig, "partial_fit"): + return + estimator = clone(estimator_orig) + X, y = make_blobs(n_samples=50, random_state=1) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = _enforce_estimator_tags_y(estimator_orig, y) + + try: + if is_classifier(estimator): + classes = np.unique(y) + estimator.partial_fit(X, y, classes=classes) + else: + estimator.partial_fit(X, y) + except NotImplementedError: + return + + with raises( + ValueError, + err_msg=( + f"The estimator {name} does not raise an error when the " + "number of features changes between calls to partial_fit." + ), + ): + estimator.partial_fit(X[:, :-1], y) + + +@ignore_warnings(category=FutureWarning) +def check_classifier_multioutput(name, estimator): + n_samples, n_labels, n_classes = 42, 5, 3 + tags = _safe_tags(estimator) + estimator = clone(estimator) + X, y = make_multilabel_classification( + random_state=42, n_samples=n_samples, n_labels=n_labels, n_classes=n_classes + ) + estimator.fit(X, y) + y_pred = estimator.predict(X) + + assert y_pred.shape == (n_samples, n_classes), ( + "The shape of the prediction for multioutput data is " + "incorrect. Expected {}, got {}.".format((n_samples, n_labels), y_pred.shape) + ) + assert y_pred.dtype.kind == "i" + + if hasattr(estimator, "decision_function"): + decision = estimator.decision_function(X) + assert isinstance(decision, np.ndarray) + assert decision.shape == (n_samples, n_classes), ( + "The shape of the decision function output for " + "multioutput data is incorrect. Expected {}, got {}.".format( + (n_samples, n_classes), decision.shape + ) + ) + + dec_pred = (decision > 0).astype(int) + dec_exp = estimator.classes_[dec_pred] + assert_array_equal(dec_exp, y_pred) + + if hasattr(estimator, "predict_proba"): + y_prob = estimator.predict_proba(X) + + if isinstance(y_prob, list) and not tags["poor_score"]: + for i in range(n_classes): + assert y_prob[i].shape == (n_samples, 2), ( + "The shape of the probability for multioutput data is" + " incorrect. Expected {}, got {}.".format( + (n_samples, 2), y_prob[i].shape + ) + ) + assert_array_equal( + np.argmax(y_prob[i], axis=1).astype(int), y_pred[:, i] + ) + elif not tags["poor_score"]: + assert y_prob.shape == (n_samples, n_classes), ( + "The shape of the probability for multioutput data is" + " incorrect. Expected {}, got {}.".format( + (n_samples, n_classes), y_prob.shape + ) + ) + assert_array_equal(y_prob.round().astype(int), y_pred) + + if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"): + for i in range(n_classes): + y_proba = estimator.predict_proba(X)[:, i] + y_decision = estimator.decision_function(X) + assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i])) + + +@ignore_warnings(category=FutureWarning) +def check_regressor_multioutput(name, estimator): + estimator = clone(estimator) + n_samples = n_features = 10 + + if not _is_pairwise_metric(estimator): + n_samples = n_samples + 1 + + X, y = make_regression( + random_state=42, n_targets=5, n_samples=n_samples, n_features=n_features + ) + X = _enforce_estimator_tags_X(estimator, X) + + estimator.fit(X, y) + y_pred = estimator.predict(X) + + assert y_pred.dtype == np.dtype("float64"), ( + "Multioutput predictions by a regressor are expected to be" + " floating-point precision. Got {} instead".format(y_pred.dtype) + ) + assert y_pred.shape == y.shape, ( + "The shape of the prediction for multioutput data is incorrect." + " Expected {}, got {}." + ) + + +@ignore_warnings(category=FutureWarning) +def check_clustering(name, clusterer_orig, readonly_memmap=False): + clusterer = clone(clusterer_orig) + X, y = make_blobs(n_samples=50, random_state=1) + X, y = shuffle(X, y, random_state=7) + X = StandardScaler().fit_transform(X) + rng = np.random.RandomState(7) + X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))]) + + if readonly_memmap: + X, y, X_noise = create_memmap_backed_data([X, y, X_noise]) + + n_samples, n_features = X.shape + # catch deprecation and neighbors warnings + if hasattr(clusterer, "n_clusters"): + clusterer.set_params(n_clusters=3) + set_random_state(clusterer) + if name == "AffinityPropagation": + clusterer.set_params(preference=-100) + clusterer.set_params(max_iter=100) + + # fit + clusterer.fit(X) + # with lists + clusterer.fit(X.tolist()) + + pred = clusterer.labels_ + assert pred.shape == (n_samples,) + assert adjusted_rand_score(pred, y) > 0.4 + if _safe_tags(clusterer, key="non_deterministic"): + return + set_random_state(clusterer) + with warnings.catch_warnings(record=True): + pred2 = clusterer.fit_predict(X) + assert_array_equal(pred, pred2) + + # fit_predict(X) and labels_ should be of type int + assert pred.dtype in [np.dtype("int32"), np.dtype("int64")] + assert pred2.dtype in [np.dtype("int32"), np.dtype("int64")] + + # Add noise to X to test the possible values of the labels + labels = clusterer.fit_predict(X_noise) + + # There should be at least one sample in every cluster. Equivalently + # labels_ should contain all the consecutive values between its + # min and its max. + labels_sorted = np.unique(labels) + assert_array_equal( + labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1) + ) + + # Labels are expected to start at 0 (no noise) or -1 (if noise) + assert labels_sorted[0] in [0, -1] + # Labels should be less than n_clusters - 1 + if hasattr(clusterer, "n_clusters"): + n_clusters = getattr(clusterer, "n_clusters") + assert n_clusters - 1 >= labels_sorted[-1] + # else labels should be less than max(labels_) which is necessarily true + + +@ignore_warnings(category=FutureWarning) +def check_clusterer_compute_labels_predict(name, clusterer_orig): + """Check that predict is invariant of compute_labels.""" + X, y = make_blobs(n_samples=20, random_state=0) + clusterer = clone(clusterer_orig) + set_random_state(clusterer) + + if hasattr(clusterer, "compute_labels"): + # MiniBatchKMeans + X_pred1 = clusterer.fit(X).predict(X) + clusterer.set_params(compute_labels=False) + X_pred2 = clusterer.fit(X).predict(X) + assert_array_equal(X_pred1, X_pred2) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_one_label(name, classifier_orig): + error_string_fit = "Classifier can't train when only one class is present." + error_string_predict = "Classifier can't predict when only one class is present." + rnd = np.random.RandomState(0) + X_train = rnd.uniform(size=(10, 3)) + X_test = rnd.uniform(size=(10, 3)) + y = np.ones(10) + # catch deprecation warnings + with ignore_warnings(category=FutureWarning): + classifier = clone(classifier_orig) + with raises( + ValueError, match="class", may_pass=True, err_msg=error_string_fit + ) as cm: + classifier.fit(X_train, y) + + if cm.raised_and_matched: + # ValueError was raised with proper error message + return + + assert_array_equal(classifier.predict(X_test), y, err_msg=error_string_predict) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_one_label_sample_weights(name, classifier_orig): + """Check that classifiers accepting sample_weight fit or throws a ValueError with + an explicit message if the problem is reduced to one class. + """ + error_fit = ( + f"{name} failed when fitted on one label after sample_weight trimming. Error " + "message is not explicit, it should have 'class'." + ) + error_predict = f"{name} prediction results should only output the remaining class." + rnd = np.random.RandomState(0) + # X should be square for test on SVC with precomputed kernel + X_train = rnd.uniform(size=(10, 10)) + X_test = rnd.uniform(size=(10, 10)) + y = np.arange(10) % 2 + sample_weight = y.copy() # select a single class + classifier = clone(classifier_orig) + + if has_fit_parameter(classifier, "sample_weight"): + match = [r"\bclass(es)?\b", error_predict] + err_type, err_msg = (AssertionError, ValueError), error_fit + else: + match = r"\bsample_weight\b" + err_type, err_msg = (TypeError, ValueError), None + + with raises(err_type, match=match, may_pass=True, err_msg=err_msg) as cm: + classifier.fit(X_train, y, sample_weight=sample_weight) + if cm.raised_and_matched: + # raise the proper error type with the proper error message + return + # for estimators that do not fail, they should be able to predict the only + # class remaining during fit + assert_array_equal( + classifier.predict(X_test), np.ones(10), err_msg=error_predict + ) + + +@ignore_warnings # Warnings are raised by decision function +def check_classifiers_train( + name, classifier_orig, readonly_memmap=False, X_dtype="float64" +): + X_m, y_m = make_blobs(n_samples=300, random_state=0) + X_m = X_m.astype(X_dtype) + X_m, y_m = shuffle(X_m, y_m, random_state=7) + X_m = StandardScaler().fit_transform(X_m) + # generate binary problem from multi-class one + y_b = y_m[y_m != 2] + X_b = X_m[y_m != 2] + + if name in ["BernoulliNB", "MultinomialNB", "ComplementNB", "CategoricalNB"]: + X_m -= X_m.min() + X_b -= X_b.min() + + if readonly_memmap: + X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b]) + + problems = [(X_b, y_b)] + tags = _safe_tags(classifier_orig) + if not tags["binary_only"]: + problems.append((X_m, y_m)) + + for X, y in problems: + classes = np.unique(y) + n_classes = len(classes) + n_samples, n_features = X.shape + classifier = clone(classifier_orig) + X = _enforce_estimator_tags_X(classifier, X) + y = _enforce_estimator_tags_y(classifier, y) + + set_random_state(classifier) + # raises error on malformed input for fit + if not tags["no_validation"]: + with raises( + ValueError, + err_msg=( + f"The classifier {name} does not raise an error when " + "incorrect/malformed input data for fit is passed. The number " + "of training examples is not the same as the number of " + "labels. Perhaps use check_X_y in fit." + ), + ): + classifier.fit(X, y[:-1]) + + # fit + classifier.fit(X, y) + # with lists + classifier.fit(X.tolist(), y.tolist()) + assert hasattr(classifier, "classes_") + y_pred = classifier.predict(X) + + assert y_pred.shape == (n_samples,) + # training set performance + if not tags["poor_score"]: + assert accuracy_score(y, y_pred) > 0.83 + + # raises error on malformed input for predict + msg_pairwise = ( + "The classifier {} does not raise an error when shape of X in " + " {} is not equal to (n_test_samples, n_training_samples)" + ) + msg = ( + "The classifier {} does not raise an error when the number of " + "features in {} is different from the number of features in " + "fit." + ) + + if not tags["no_validation"]: + if tags["pairwise"]: + with raises( + ValueError, + err_msg=msg_pairwise.format(name, "predict"), + ): + classifier.predict(X.reshape(-1, 1)) + else: + with raises(ValueError, err_msg=msg.format(name, "predict")): + classifier.predict(X.T) + if hasattr(classifier, "decision_function"): + try: + # decision_function agrees with predict + decision = classifier.decision_function(X) + if n_classes == 2: + if not tags["multioutput_only"]: + assert decision.shape == (n_samples,) + else: + assert decision.shape == (n_samples, 1) + dec_pred = (decision.ravel() > 0).astype(int) + assert_array_equal(dec_pred, y_pred) + else: + assert decision.shape == (n_samples, n_classes) + assert_array_equal(np.argmax(decision, axis=1), y_pred) + + # raises error on malformed input for decision_function + if not tags["no_validation"]: + if tags["pairwise"]: + with raises( + ValueError, + err_msg=msg_pairwise.format(name, "decision_function"), + ): + classifier.decision_function(X.reshape(-1, 1)) + else: + with raises( + ValueError, + err_msg=msg.format(name, "decision_function"), + ): + classifier.decision_function(X.T) + except NotImplementedError: + pass + + if hasattr(classifier, "predict_proba"): + # predict_proba agrees with predict + y_prob = classifier.predict_proba(X) + assert y_prob.shape == (n_samples, n_classes) + assert_array_equal(np.argmax(y_prob, axis=1), y_pred) + # check that probas for all classes sum to one + assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) + if not tags["no_validation"]: + # raises error on malformed input for predict_proba + if tags["pairwise"]: + with raises( + ValueError, + err_msg=msg_pairwise.format(name, "predict_proba"), + ): + classifier.predict_proba(X.reshape(-1, 1)) + else: + with raises( + ValueError, + err_msg=msg.format(name, "predict_proba"), + ): + classifier.predict_proba(X.T) + if hasattr(classifier, "predict_log_proba"): + # predict_log_proba is a transformation of predict_proba + y_log_prob = classifier.predict_log_proba(X) + assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9) + assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob)) + + +def check_outlier_corruption(num_outliers, expected_outliers, decision): + # Check for deviation from the precise given contamination level that may + # be due to ties in the anomaly scores. + if num_outliers < expected_outliers: + start = num_outliers + end = expected_outliers + 1 + else: + start = expected_outliers + end = num_outliers + 1 + + # ensure that all values in the 'critical area' are tied, + # leading to the observed discrepancy between provided + # and actual contamination levels. + sorted_decision = np.sort(decision) + msg = ( + "The number of predicted outliers is not equal to the expected " + "number of outliers and this difference is not explained by the " + "number of ties in the decision_function values" + ) + assert len(np.unique(sorted_decision[start:end])) == 1, msg + + +def check_outliers_train(name, estimator_orig, readonly_memmap=True): + n_samples = 300 + X, _ = make_blobs(n_samples=n_samples, random_state=0) + X = shuffle(X, random_state=7) + + if readonly_memmap: + X = create_memmap_backed_data(X) + + n_samples, n_features = X.shape + estimator = clone(estimator_orig) + set_random_state(estimator) + + # fit + estimator.fit(X) + # with lists + estimator.fit(X.tolist()) + + y_pred = estimator.predict(X) + assert y_pred.shape == (n_samples,) + assert y_pred.dtype.kind == "i" + assert_array_equal(np.unique(y_pred), np.array([-1, 1])) + + decision = estimator.decision_function(X) + scores = estimator.score_samples(X) + for output in [decision, scores]: + assert output.dtype == np.dtype("float") + assert output.shape == (n_samples,) + + # raises error on malformed input for predict + with raises(ValueError): + estimator.predict(X.T) + + # decision_function agrees with predict + dec_pred = (decision >= 0).astype(int) + dec_pred[dec_pred == 0] = -1 + assert_array_equal(dec_pred, y_pred) + + # raises error on malformed input for decision_function + with raises(ValueError): + estimator.decision_function(X.T) + + # decision_function is a translation of score_samples + y_dec = scores - estimator.offset_ + assert_allclose(y_dec, decision) + + # raises error on malformed input for score_samples + with raises(ValueError): + estimator.score_samples(X.T) + + # contamination parameter (not for OneClassSVM which has the nu parameter) + if hasattr(estimator, "contamination") and not hasattr(estimator, "novelty"): + # proportion of outliers equal to contamination parameter when not + # set to 'auto'. This is true for the training set and cannot thus be + # checked as follows for estimators with a novelty parameter such as + # LocalOutlierFactor (tested in check_outliers_fit_predict) + expected_outliers = 30 + contamination = expected_outliers / n_samples + estimator.set_params(contamination=contamination) + estimator.fit(X) + y_pred = estimator.predict(X) + + num_outliers = np.sum(y_pred != 1) + # num_outliers should be equal to expected_outliers unless + # there are ties in the decision_function values. this can + # only be tested for estimators with a decision_function + # method, i.e. all estimators except LOF which is already + # excluded from this if branch. + if num_outliers != expected_outliers: + decision = estimator.decision_function(X) + check_outlier_corruption(num_outliers, expected_outliers, decision) + + +def check_outlier_contamination(name, estimator_orig): + # Check that the contamination parameter is in (0.0, 0.5] when it is an + # interval constraint. + + if not hasattr(estimator_orig, "_parameter_constraints"): + # Only estimator implementing parameter constraints will be checked + return + + if "contamination" not in estimator_orig._parameter_constraints: + return + + contamination_constraints = estimator_orig._parameter_constraints["contamination"] + if not any([isinstance(c, Interval) for c in contamination_constraints]): + raise AssertionError( + "contamination constraints should contain a Real Interval constraint." + ) + + for constraint in contamination_constraints: + if isinstance(constraint, Interval): + assert ( + constraint.type == Real + and constraint.left >= 0.0 + and constraint.right <= 0.5 + and (constraint.left > 0 or constraint.closed in {"right", "neither"}) + ), "contamination constraint should be an interval in (0, 0.5]" + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_representation_invariance(name, classifier_orig): + X, y = make_multilabel_classification( + n_samples=100, + n_features=2, + n_classes=5, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, y_train = X[:80], y[:80] + X_test = X[80:] + + y_train_list_of_lists = y_train.tolist() + y_train_list_of_arrays = list(y_train) + + classifier = clone(classifier_orig) + set_random_state(classifier) + + y_pred = classifier.fit(X_train, y_train).predict(X_test) + + y_pred_list_of_lists = classifier.fit(X_train, y_train_list_of_lists).predict( + X_test + ) + + y_pred_list_of_arrays = classifier.fit(X_train, y_train_list_of_arrays).predict( + X_test + ) + + assert_array_equal(y_pred, y_pred_list_of_arrays) + assert_array_equal(y_pred, y_pred_list_of_lists) + + assert y_pred.dtype == y_pred_list_of_arrays.dtype + assert y_pred.dtype == y_pred_list_of_lists.dtype + assert type(y_pred) == type(y_pred_list_of_arrays) + assert type(y_pred) == type(y_pred_list_of_lists) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_output_format_predict(name, classifier_orig): + """Check the output of the `predict` method for classifiers supporting + multilabel-indicator targets.""" + classifier = clone(classifier_orig) + set_random_state(classifier) + + n_samples, test_size, n_outputs = 100, 25, 5 + X, y = make_multilabel_classification( + n_samples=n_samples, + n_features=2, + n_classes=n_outputs, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, X_test = X[:-test_size], X[-test_size:] + y_train, y_test = y[:-test_size], y[-test_size:] + classifier.fit(X_train, y_train) + + response_method_name = "predict" + predict_method = getattr(classifier, response_method_name, None) + if predict_method is None: + raise SkipTest(f"{name} does not have a {response_method_name} method.") + + y_pred = predict_method(X_test) + + # y_pred.shape -> y_test.shape with the same dtype + assert isinstance(y_pred, np.ndarray), ( + f"{name}.predict is expected to output a NumPy array. Got " + f"{type(y_pred)} instead." + ) + assert y_pred.shape == y_test.shape, ( + f"{name}.predict outputs a NumPy array of shape {y_pred.shape} " + f"instead of {y_test.shape}." + ) + assert y_pred.dtype == y_test.dtype, ( + f"{name}.predict does not output the same dtype than the targets. " + f"Got {y_pred.dtype} instead of {y_test.dtype}." + ) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_output_format_predict_proba(name, classifier_orig): + """Check the output of the `predict_proba` method for classifiers supporting + multilabel-indicator targets.""" + classifier = clone(classifier_orig) + set_random_state(classifier) + + n_samples, test_size, n_outputs = 100, 25, 5 + X, y = make_multilabel_classification( + n_samples=n_samples, + n_features=2, + n_classes=n_outputs, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, X_test = X[:-test_size], X[-test_size:] + y_train = y[:-test_size] + classifier.fit(X_train, y_train) + + response_method_name = "predict_proba" + predict_proba_method = getattr(classifier, response_method_name, None) + if predict_proba_method is None: + raise SkipTest(f"{name} does not have a {response_method_name} method.") + + y_pred = predict_proba_method(X_test) + + # y_pred.shape -> 2 possibilities: + # - list of length n_outputs of shape (n_samples, 2); + # - ndarray of shape (n_samples, n_outputs). + # dtype should be floating + if isinstance(y_pred, list): + assert len(y_pred) == n_outputs, ( + f"When {name}.predict_proba returns a list, the list should " + "be of length n_outputs and contain NumPy arrays. Got length " + f"of {len(y_pred)} instead of {n_outputs}." + ) + for pred in y_pred: + assert pred.shape == (test_size, 2), ( + f"When {name}.predict_proba returns a list, this list " + "should contain NumPy arrays of shape (n_samples, 2). Got " + f"NumPy arrays of shape {pred.shape} instead of " + f"{(test_size, 2)}." + ) + assert pred.dtype.kind == "f", ( + f"When {name}.predict_proba returns a list, it should " + "contain NumPy arrays with floating dtype. Got " + f"{pred.dtype} instead." + ) + # check that we have the correct probabilities + err_msg = ( + f"When {name}.predict_proba returns a list, each NumPy " + "array should contain probabilities for each class and " + "thus each row should sum to 1 (or close to 1 due to " + "numerical errors)." + ) + assert_allclose(pred.sum(axis=1), 1, err_msg=err_msg) + elif isinstance(y_pred, np.ndarray): + assert y_pred.shape == (test_size, n_outputs), ( + f"When {name}.predict_proba returns a NumPy array, the " + f"expected shape is (n_samples, n_outputs). Got {y_pred.shape}" + f" instead of {(test_size, n_outputs)}." + ) + assert y_pred.dtype.kind == "f", ( + f"When {name}.predict_proba returns a NumPy array, the " + f"expected data type is floating. Got {y_pred.dtype} instead." + ) + err_msg = ( + f"When {name}.predict_proba returns a NumPy array, this array " + "is expected to provide probabilities of the positive class " + "and should therefore contain values between 0 and 1." + ) + assert_array_less(0, y_pred, err_msg=err_msg) + assert_array_less(y_pred, 1, err_msg=err_msg) + else: + raise ValueError( + f"Unknown returned type {type(y_pred)} by {name}." + "predict_proba. A list or a Numpy array is expected." + ) + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_multilabel_output_format_decision_function(name, classifier_orig): + """Check the output of the `decision_function` method for classifiers supporting + multilabel-indicator targets.""" + classifier = clone(classifier_orig) + set_random_state(classifier) + + n_samples, test_size, n_outputs = 100, 25, 5 + X, y = make_multilabel_classification( + n_samples=n_samples, + n_features=2, + n_classes=n_outputs, + n_labels=3, + length=50, + allow_unlabeled=True, + random_state=0, + ) + X = scale(X) + + X_train, X_test = X[:-test_size], X[-test_size:] + y_train = y[:-test_size] + classifier.fit(X_train, y_train) + + response_method_name = "decision_function" + decision_function_method = getattr(classifier, response_method_name, None) + if decision_function_method is None: + raise SkipTest(f"{name} does not have a {response_method_name} method.") + + y_pred = decision_function_method(X_test) + + # y_pred.shape -> y_test.shape with floating dtype + assert isinstance(y_pred, np.ndarray), ( + f"{name}.decision_function is expected to output a NumPy array." + f" Got {type(y_pred)} instead." + ) + assert y_pred.shape == (test_size, n_outputs), ( + f"{name}.decision_function is expected to provide a NumPy array " + f"of shape (n_samples, n_outputs). Got {y_pred.shape} instead of " + f"{(test_size, n_outputs)}." + ) + assert y_pred.dtype.kind == "f", ( + f"{name}.decision_function is expected to output a floating dtype." + f" Got {y_pred.dtype} instead." + ) + + +@ignore_warnings(category=FutureWarning) +def check_get_feature_names_out_error(name, estimator_orig): + """Check the error raised by get_feature_names_out when called before fit. + + Unfitted estimators with get_feature_names_out should raise a NotFittedError. + """ + + estimator = clone(estimator_orig) + err_msg = ( + f"Estimator {name} should have raised a NotFitted error when fit is called" + " before get_feature_names_out" + ) + with raises(NotFittedError, err_msg=err_msg): + estimator.get_feature_names_out() + + +@ignore_warnings(category=FutureWarning) +def check_estimators_fit_returns_self(name, estimator_orig, readonly_memmap=False): + """Check if self is returned when calling fit.""" + X, y = make_blobs(random_state=0, n_samples=21) + X = _enforce_estimator_tags_X(estimator_orig, X) + + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + if readonly_memmap: + X, y = create_memmap_backed_data([X, y]) + + set_random_state(estimator) + assert estimator.fit(X, y) is estimator + + +@ignore_warnings +def check_estimators_unfitted(name, estimator_orig): + """Check that predict raises an exception in an unfitted estimator. + + Unfitted estimators should raise a NotFittedError. + """ + # Common test for Regressors, Classifiers and Outlier detection estimators + X, y = _regression_dataset() + + estimator = clone(estimator_orig) + for method in ( + "decision_function", + "predict", + "predict_proba", + "predict_log_proba", + ): + if hasattr(estimator, method): + with raises(NotFittedError): + getattr(estimator, method)(X) + + +@ignore_warnings(category=FutureWarning) +def check_supervised_y_2d(name, estimator_orig): + tags = _safe_tags(estimator_orig) + rnd = np.random.RandomState(0) + n_samples = 30 + X = _enforce_estimator_tags_X(estimator_orig, rnd.uniform(size=(n_samples, 3))) + y = np.arange(n_samples) % 3 + y = _enforce_estimator_tags_y(estimator_orig, y) + estimator = clone(estimator_orig) + set_random_state(estimator) + # fit + estimator.fit(X, y) + y_pred = estimator.predict(X) + + set_random_state(estimator) + # Check that when a 2D y is given, a DataConversionWarning is + # raised + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", DataConversionWarning) + warnings.simplefilter("ignore", RuntimeWarning) + estimator.fit(X, y[:, np.newaxis]) + y_pred_2d = estimator.predict(X) + msg = "expected 1 DataConversionWarning, got: %s" % ", ".join( + [str(w_x) for w_x in w] + ) + if not tags["multioutput"]: + # check that we warned if we don't support multi-output + assert len(w) > 0, msg + assert ( + "DataConversionWarning('A column-vector y" + " was passed when a 1d array was expected" + in msg + ) + assert_allclose(y_pred.ravel(), y_pred_2d.ravel()) + + +@ignore_warnings +def check_classifiers_predictions(X, y, name, classifier_orig): + classes = np.unique(y) + classifier = clone(classifier_orig) + if name == "BernoulliNB": + X = X > X.mean() + set_random_state(classifier) + + classifier.fit(X, y) + y_pred = classifier.predict(X) + + if hasattr(classifier, "decision_function"): + decision = classifier.decision_function(X) + assert isinstance(decision, np.ndarray) + if len(classes) == 2: + dec_pred = (decision.ravel() > 0).astype(int) + dec_exp = classifier.classes_[dec_pred] + assert_array_equal( + dec_exp, + y_pred, + err_msg=( + "decision_function does not match " + "classifier for %r: expected '%s', got '%s'" + ) + % ( + classifier, + ", ".join(map(str, dec_exp)), + ", ".join(map(str, y_pred)), + ), + ) + elif getattr(classifier, "decision_function_shape", "ovr") == "ovr": + decision_y = np.argmax(decision, axis=1).astype(int) + y_exp = classifier.classes_[decision_y] + assert_array_equal( + y_exp, + y_pred, + err_msg=( + "decision_function does not match " + "classifier for %r: expected '%s', got '%s'" + ) + % ( + classifier, + ", ".join(map(str, y_exp)), + ", ".join(map(str, y_pred)), + ), + ) + + # training set performance + if name != "ComplementNB": + # This is a pathological data set for ComplementNB. + # For some specific cases 'ComplementNB' predicts less classes + # than expected + assert_array_equal(np.unique(y), np.unique(y_pred)) + assert_array_equal( + classes, + classifier.classes_, + err_msg="Unexpected classes_ attribute for %r: expected '%s', got '%s'" + % ( + classifier, + ", ".join(map(str, classes)), + ", ".join(map(str, classifier.classes_)), + ), + ) + + +def _choose_check_classifiers_labels(name, y, y_names): + # Semisupervised classifiers use -1 as the indicator for an unlabeled + # sample. + return ( + y + if name in ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"] + else y_names + ) + + +def check_classifiers_classes(name, classifier_orig): + X_multiclass, y_multiclass = make_blobs( + n_samples=30, random_state=0, cluster_std=0.1 + ) + X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass, random_state=7) + X_multiclass = StandardScaler().fit_transform(X_multiclass) + + X_binary = X_multiclass[y_multiclass != 2] + y_binary = y_multiclass[y_multiclass != 2] + + X_multiclass = _enforce_estimator_tags_X(classifier_orig, X_multiclass) + X_binary = _enforce_estimator_tags_X(classifier_orig, X_binary) + + labels_multiclass = ["one", "two", "three"] + labels_binary = ["one", "two"] + + y_names_multiclass = np.take(labels_multiclass, y_multiclass) + y_names_binary = np.take(labels_binary, y_binary) + + problems = [(X_binary, y_binary, y_names_binary)] + if not _safe_tags(classifier_orig, key="binary_only"): + problems.append((X_multiclass, y_multiclass, y_names_multiclass)) + + for X, y, y_names in problems: + for y_names_i in [y_names, y_names.astype("O")]: + y_ = _choose_check_classifiers_labels(name, y, y_names_i) + check_classifiers_predictions(X, y_, name, classifier_orig) + + labels_binary = [-1, 1] + y_names_binary = np.take(labels_binary, y_binary) + y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary) + check_classifiers_predictions(X_binary, y_binary, name, classifier_orig) + + +@ignore_warnings(category=FutureWarning) +def check_regressors_int(name, regressor_orig): + X, _ = _regression_dataset() + X = _enforce_estimator_tags_X(regressor_orig, X[:50]) + rnd = np.random.RandomState(0) + y = rnd.randint(3, size=X.shape[0]) + y = _enforce_estimator_tags_y(regressor_orig, y) + rnd = np.random.RandomState(0) + # separate estimators to control random seeds + regressor_1 = clone(regressor_orig) + regressor_2 = clone(regressor_orig) + set_random_state(regressor_1) + set_random_state(regressor_2) + + if name in CROSS_DECOMPOSITION: + y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) + y_ = y_.T + else: + y_ = y + + # fit + regressor_1.fit(X, y_) + pred1 = regressor_1.predict(X) + regressor_2.fit(X, y_.astype(float)) + pred2 = regressor_2.predict(X) + assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) + + +@ignore_warnings(category=FutureWarning) +def check_regressors_train( + name, regressor_orig, readonly_memmap=False, X_dtype=np.float64 +): + X, y = _regression_dataset() + X = X.astype(X_dtype) + y = scale(y) # X is already scaled + regressor = clone(regressor_orig) + X = _enforce_estimator_tags_X(regressor, X) + y = _enforce_estimator_tags_y(regressor, y) + if name in CROSS_DECOMPOSITION: + rnd = np.random.RandomState(0) + y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) + y_ = y_.T + else: + y_ = y + + if readonly_memmap: + X, y, y_ = create_memmap_backed_data([X, y, y_]) + + if not hasattr(regressor, "alphas") and hasattr(regressor, "alpha"): + # linear regressors need to set alpha, but not generalized CV ones + regressor.alpha = 0.01 + if name == "PassiveAggressiveRegressor": + regressor.C = 0.01 + + # raises error on malformed input for fit + with raises( + ValueError, + err_msg=( + f"The classifier {name} does not raise an error when " + "incorrect/malformed input data for fit is passed. The number of " + "training examples is not the same as the number of labels. Perhaps " + "use check_X_y in fit." + ), + ): + regressor.fit(X, y[:-1]) + # fit + set_random_state(regressor) + regressor.fit(X, y_) + regressor.fit(X.tolist(), y_.tolist()) + y_pred = regressor.predict(X) + assert y_pred.shape == y_.shape + + # TODO: find out why PLS and CCA fail. RANSAC is random + # and furthermore assumes the presence of outliers, hence + # skipped + if not _safe_tags(regressor, key="poor_score"): + assert regressor.score(X, y_) > 0.5 + + +@ignore_warnings +def check_regressors_no_decision_function(name, regressor_orig): + # check that regressors don't have a decision_function, predict_proba, or + # predict_log_proba method. + rng = np.random.RandomState(0) + regressor = clone(regressor_orig) + + X = rng.normal(size=(10, 4)) + X = _enforce_estimator_tags_X(regressor_orig, X) + y = _enforce_estimator_tags_y(regressor, X[:, 0]) + + regressor.fit(X, y) + funcs = ["decision_function", "predict_proba", "predict_log_proba"] + for func_name in funcs: + assert not hasattr(regressor, func_name) + + +@ignore_warnings(category=FutureWarning) +def check_class_weight_classifiers(name, classifier_orig): + if _safe_tags(classifier_orig, key="binary_only"): + problems = [2] + else: + problems = [2, 3] + + for n_centers in problems: + # create a very noisy dataset + X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.5, random_state=0 + ) + + # can't use gram_if_pairwise() here, setting up gram matrix manually + if _safe_tags(classifier_orig, key="pairwise"): + X_test = rbf_kernel(X_test, X_train) + X_train = rbf_kernel(X_train, X_train) + + n_centers = len(np.unique(y_train)) + + if n_centers == 2: + class_weight = {0: 1000, 1: 0.0001} + else: + class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} + + classifier = clone(classifier_orig).set_params(class_weight=class_weight) + if hasattr(classifier, "n_iter"): + classifier.set_params(n_iter=100) + if hasattr(classifier, "max_iter"): + classifier.set_params(max_iter=1000) + if hasattr(classifier, "min_weight_fraction_leaf"): + classifier.set_params(min_weight_fraction_leaf=0.01) + if hasattr(classifier, "n_iter_no_change"): + classifier.set_params(n_iter_no_change=20) + + set_random_state(classifier) + classifier.fit(X_train, y_train) + y_pred = classifier.predict(X_test) + # XXX: Generally can use 0.89 here. On Windows, LinearSVC gets + # 0.88 (Issue #9111) + if not _safe_tags(classifier_orig, key="poor_score"): + assert np.mean(y_pred == 0) > 0.87 + + +@ignore_warnings(category=FutureWarning) +def check_class_weight_balanced_classifiers( + name, classifier_orig, X_train, y_train, X_test, y_test, weights +): + classifier = clone(classifier_orig) + if hasattr(classifier, "n_iter"): + classifier.set_params(n_iter=100) + if hasattr(classifier, "max_iter"): + classifier.set_params(max_iter=1000) + + set_random_state(classifier) + classifier.fit(X_train, y_train) + y_pred = classifier.predict(X_test) + + classifier.set_params(class_weight="balanced") + classifier.fit(X_train, y_train) + y_pred_balanced = classifier.predict(X_test) + assert f1_score(y_test, y_pred_balanced, average="weighted") > f1_score( + y_test, y_pred, average="weighted" + ) + + +@ignore_warnings(category=FutureWarning) +def check_class_weight_balanced_linear_classifier(name, Classifier): + """Test class weights with non-contiguous class labels.""" + # this is run on classes, not instances, though this should be changed + X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) + y = np.array([1, 1, 1, -1, -1]) + + classifier = Classifier() + + if hasattr(classifier, "n_iter"): + # This is a very small dataset, default n_iter are likely to prevent + # convergence + classifier.set_params(n_iter=1000) + if hasattr(classifier, "max_iter"): + classifier.set_params(max_iter=1000) + if hasattr(classifier, "cv"): + classifier.set_params(cv=3) + set_random_state(classifier) + + # Let the model compute the class frequencies + classifier.set_params(class_weight="balanced") + coef_balanced = classifier.fit(X, y).coef_.copy() + + # Count each label occurrence to reweight manually + n_samples = len(y) + n_classes = float(len(np.unique(y))) + + class_weight = { + 1: n_samples / (np.sum(y == 1) * n_classes), + -1: n_samples / (np.sum(y == -1) * n_classes), + } + classifier.set_params(class_weight=class_weight) + coef_manual = classifier.fit(X, y).coef_.copy() + + assert_allclose( + coef_balanced, + coef_manual, + err_msg="Classifier %s is not computing class_weight=balanced properly." % name, + ) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_overwrite_params(name, estimator_orig): + X, y = make_blobs(random_state=0, n_samples=21) + X = _enforce_estimator_tags_X(estimator_orig, X, kernel=rbf_kernel) + estimator = clone(estimator_orig) + y = _enforce_estimator_tags_y(estimator, y) + + set_random_state(estimator) + + # Make a physical copy of the original estimator parameters before fitting. + params = estimator.get_params() + original_params = deepcopy(params) + + # Fit the model + estimator.fit(X, y) + + # Compare the state of the model parameters with the original parameters + new_params = estimator.get_params() + for param_name, original_value in original_params.items(): + new_value = new_params[param_name] + + # We should never change or mutate the internal state of input + # parameters by default. To check this we use the joblib.hash function + # that introspects recursively any subobjects to compute a checksum. + # The only exception to this rule of immutable constructor parameters + # is possible RandomState instance but in this check we explicitly + # fixed the random_state params recursively to be integer seeds. + assert joblib.hash(new_value) == joblib.hash(original_value), ( + "Estimator %s should not change or mutate " + " the parameter %s from %s to %s during fit." + % (name, param_name, original_value, new_value) + ) + + +@ignore_warnings(category=FutureWarning) +def check_no_attributes_set_in_init(name, estimator_orig): + """Check setting during init.""" + try: + # Clone fails if the estimator does not store + # all parameters as an attribute during init + estimator = clone(estimator_orig) + except AttributeError: + raise AttributeError( + f"Estimator {name} should store all parameters as an attribute during init." + ) + + if hasattr(type(estimator).__init__, "deprecated_original"): + return + + init_params = _get_args(type(estimator).__init__) + if IS_PYPY: + # __init__ signature has additional objects in PyPy + for key in ["obj"]: + if key in init_params: + init_params.remove(key) + parents_init_params = [ + param + for params_parent in (_get_args(parent) for parent in type(estimator).__mro__) + for param in params_parent + ] + + # Test for no setting apart from parameters during init + invalid_attr = set(vars(estimator)) - set(init_params) - set(parents_init_params) + # Ignore private attributes + invalid_attr = set([attr for attr in invalid_attr if not attr.startswith("_")]) + assert not invalid_attr, ( + "Estimator %s should not set any attribute apart" + " from parameters during init. Found attributes %s." + % (name, sorted(invalid_attr)) + ) + + +@ignore_warnings(category=FutureWarning) +def check_sparsify_coefficients(name, estimator_orig): + X = np.array( + [ + [-2, -1], + [-1, -1], + [-1, -2], + [1, 1], + [1, 2], + [2, 1], + [-1, -2], + [2, 2], + [-2, -2], + ] + ) + y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + y = _enforce_estimator_tags_y(estimator_orig, y) + est = clone(estimator_orig) + + est.fit(X, y) + pred_orig = est.predict(X) + + # test sparsify with dense inputs + est.sparsify() + assert sparse.issparse(est.coef_) + pred = est.predict(X) + assert_array_equal(pred, pred_orig) + + # pickle and unpickle with sparse coef_ + est = pickle.loads(pickle.dumps(est)) + assert sparse.issparse(est.coef_) + pred = est.predict(X) + assert_array_equal(pred, pred_orig) + + +@ignore_warnings(category=FutureWarning) +def check_classifier_data_not_an_array(name, estimator_orig): + X = np.array( + [ + [3, 0], + [0, 1], + [0, 2], + [1, 1], + [1, 2], + [2, 1], + [0, 3], + [1, 0], + [2, 0], + [4, 4], + [2, 3], + [3, 2], + ] + ) + X = _enforce_estimator_tags_X(estimator_orig, X) + y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2]) + y = _enforce_estimator_tags_y(estimator_orig, y) + for obj_type in ["NotAnArray", "PandasDataframe"]: + check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type) + + +@ignore_warnings(category=FutureWarning) +def check_regressor_data_not_an_array(name, estimator_orig): + X, y = _regression_dataset() + X = _enforce_estimator_tags_X(estimator_orig, X) + y = _enforce_estimator_tags_y(estimator_orig, y) + for obj_type in ["NotAnArray", "PandasDataframe"]: + check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type) + + +@ignore_warnings(category=FutureWarning) +def check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type): + if name in CROSS_DECOMPOSITION: + raise SkipTest( + "Skipping check_estimators_data_not_an_array " + "for cross decomposition module as estimators " + "are not deterministic." + ) + # separate estimators to control random seeds + estimator_1 = clone(estimator_orig) + estimator_2 = clone(estimator_orig) + set_random_state(estimator_1) + set_random_state(estimator_2) + + if obj_type not in ["NotAnArray", "PandasDataframe"]: + raise ValueError("Data type {0} not supported".format(obj_type)) + + if obj_type == "NotAnArray": + y_ = _NotAnArray(np.asarray(y)) + X_ = _NotAnArray(np.asarray(X)) + else: + # Here pandas objects (Series and DataFrame) are tested explicitly + # because some estimators may handle them (especially their indexing) + # specially. + try: + import pandas as pd + + y_ = np.asarray(y) + if y_.ndim == 1: + y_ = pd.Series(y_, copy=False) + else: + y_ = pd.DataFrame(y_, copy=False) + X_ = pd.DataFrame(np.asarray(X), copy=False) + + except ImportError: + raise SkipTest( + "pandas is not installed: not checking estimators for pandas objects." + ) + + # fit + estimator_1.fit(X_, y_) + pred1 = estimator_1.predict(X_) + estimator_2.fit(X, y) + pred2 = estimator_2.predict(X) + assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) + + +def check_parameters_default_constructible(name, Estimator): + # test default-constructibility + # get rid of deprecation warnings + + Estimator = Estimator.__class__ + + with ignore_warnings(category=FutureWarning): + estimator = _construct_instance(Estimator) + # test cloning + clone(estimator) + # test __repr__ + repr(estimator) + # test that set_params returns self + assert estimator.set_params() is estimator + + # test if init does nothing but set parameters + # this is important for grid_search etc. + # We get the default parameters from init and then + # compare these against the actual values of the attributes. + + # this comes from getattr. Gets rid of deprecation decorator. + init = getattr(estimator.__init__, "deprecated_original", estimator.__init__) + + try: + + def param_filter(p): + """Identify hyper parameters of an estimator.""" + return ( + p.name != "self" + and p.kind != p.VAR_KEYWORD + and p.kind != p.VAR_POSITIONAL + ) + + init_params = [ + p for p in signature(init).parameters.values() if param_filter(p) + ] + + except (TypeError, ValueError): + # init is not a python function. + # true for mixins + return + params = estimator.get_params() + # they can need a non-default argument + init_params = init_params[len(getattr(estimator, "_required_parameters", [])) :] + + for init_param in init_params: + assert ( + init_param.default != init_param.empty + ), "parameter %s for %s has no default value" % ( + init_param.name, + type(estimator).__name__, + ) + allowed_types = { + str, + int, + float, + bool, + tuple, + type(None), + type, + } + # Any numpy numeric such as np.int32. + allowed_types.update(np.sctypeDict.values()) + + allowed_value = ( + type(init_param.default) in allowed_types + or + # Although callables are mutable, we accept them as argument + # default value and trust that neither the implementation of + # the callable nor of the estimator changes the state of the + # callable. + callable(init_param.default) + ) + + assert allowed_value, ( + f"Parameter '{init_param.name}' of estimator " + f"'{Estimator.__name__}' is of type " + f"{type(init_param.default).__name__} which is not allowed. " + f"'{init_param.name}' must be a callable or must be of type " + f"{set(type.__name__ for type in allowed_types)}." + ) + if init_param.name not in params.keys(): + # deprecated parameter, not in get_params + assert init_param.default is None, ( + f"Estimator parameter '{init_param.name}' of estimator " + f"'{Estimator.__name__}' is not returned by get_params. " + "If it is deprecated, set its default value to None." + ) + continue + + param_value = params[init_param.name] + if isinstance(param_value, np.ndarray): + assert_array_equal(param_value, init_param.default) + else: + failure_text = ( + f"Parameter {init_param.name} was mutated on init. All " + "parameters must be stored unchanged." + ) + if is_scalar_nan(param_value): + # Allows to set default parameters to np.nan + assert param_value is init_param.default, failure_text + else: + assert param_value == init_param.default, failure_text + + +def _enforce_estimator_tags_y(estimator, y): + # Estimators with a `requires_positive_y` tag only accept strictly positive + # data + if _safe_tags(estimator, key="requires_positive_y"): + # Create strictly positive y. The minimal increment above 0 is 1, as + # y could be of integer dtype. + y += 1 + abs(y.min()) + if _safe_tags(estimator, key="binary_only") and y.size > 0: + y = np.where(y == y.flat[0], y, y.flat[0] + 1) + # Estimators in mono_output_task_error raise ValueError if y is of 1-D + # Convert into a 2-D y for those estimators. + if _safe_tags(estimator, key="multioutput_only"): + return np.reshape(y, (-1, 1)) + return y + + +def _enforce_estimator_tags_X(estimator, X, kernel=linear_kernel): + # Estimators with `1darray` in `X_types` tag only accept + # X of shape (`n_samples`,) + if "1darray" in _safe_tags(estimator, key="X_types"): + X = X[:, 0] + # Estimators with a `requires_positive_X` tag only accept + # strictly positive data + if _safe_tags(estimator, key="requires_positive_X"): + X = X - X.min() + if "categorical" in _safe_tags(estimator, key="X_types"): + dtype = np.float64 if _safe_tags(estimator, key="allow_nan") else np.int32 + X = np.round((X - X.min())).astype(dtype) + + if estimator.__class__.__name__ == "SkewedChi2Sampler": + # SkewedChi2Sampler requires X > -skewdness in transform + X = X - X.min() + + # Pairwise estimators only accept + # X of shape (`n_samples`, `n_samples`) + if _is_pairwise_metric(estimator): + X = pairwise_distances(X, metric="euclidean") + elif _safe_tags(estimator, key="pairwise"): + X = kernel(X, X) + return X + + +@ignore_warnings(category=FutureWarning) +def check_non_transformer_estimators_n_iter(name, estimator_orig): + # Test that estimators that are not transformers with a parameter + # max_iter, return the attribute of n_iter_ at least 1. + + # These models are dependent on external solvers like + # libsvm and accessing the iter parameter is non-trivial. + # SelfTrainingClassifier does not perform an iteration if all samples are + # labeled, hence n_iter_ = 0 is valid. + not_run_check_n_iter = [ + "Ridge", + "RidgeClassifier", + "RandomizedLasso", + "LogisticRegressionCV", + "LinearSVC", + "LogisticRegression", + "SelfTrainingClassifier", + ] + + # Tested in test_transformer_n_iter + not_run_check_n_iter += CROSS_DECOMPOSITION + if name in not_run_check_n_iter: + return + + # LassoLars stops early for the default alpha=1.0 the iris dataset. + if name == "LassoLars": + estimator = clone(estimator_orig).set_params(alpha=0.0) + else: + estimator = clone(estimator_orig) + if hasattr(estimator, "max_iter"): + iris = load_iris() + X, y_ = iris.data, iris.target + y_ = _enforce_estimator_tags_y(estimator, y_) + + set_random_state(estimator, 0) + + X = _enforce_estimator_tags_X(estimator_orig, X) + + estimator.fit(X, y_) + + assert np.all(estimator.n_iter_ >= 1) + + +@ignore_warnings(category=FutureWarning) +def check_transformer_n_iter(name, estimator_orig): + # Test that transformers with a parameter max_iter, return the + # attribute of n_iter_ at least 1. + estimator = clone(estimator_orig) + if hasattr(estimator, "max_iter"): + if name in CROSS_DECOMPOSITION: + # Check using default data + X = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [2.0, 5.0, 4.0]] + y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] + + else: + X, y_ = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = _enforce_estimator_tags_X(estimator_orig, X) + set_random_state(estimator, 0) + estimator.fit(X, y_) + + # These return a n_iter per component. + if name in CROSS_DECOMPOSITION: + for iter_ in estimator.n_iter_: + assert iter_ >= 1 + else: + assert estimator.n_iter_ >= 1 + + +@ignore_warnings(category=FutureWarning) +def check_get_params_invariance(name, estimator_orig): + # Checks if get_params(deep=False) is a subset of get_params(deep=True) + e = clone(estimator_orig) + + shallow_params = e.get_params(deep=False) + deep_params = e.get_params(deep=True) + + assert all(item in deep_params.items() for item in shallow_params.items()) + + +@ignore_warnings(category=FutureWarning) +def check_set_params(name, estimator_orig): + # Check that get_params() returns the same thing + # before and after set_params() with some fuzz + estimator = clone(estimator_orig) + + orig_params = estimator.get_params(deep=False) + msg = "get_params result does not match what was passed to set_params" + + estimator.set_params(**orig_params) + curr_params = estimator.get_params(deep=False) + assert set(orig_params.keys()) == set(curr_params.keys()), msg + for k, v in curr_params.items(): + assert orig_params[k] is v, msg + + # some fuzz values + test_values = [-np.inf, np.inf, None] + + test_params = deepcopy(orig_params) + for param_name in orig_params.keys(): + default_value = orig_params[param_name] + for value in test_values: + test_params[param_name] = value + try: + estimator.set_params(**test_params) + except (TypeError, ValueError) as e: + e_type = e.__class__.__name__ + # Exception occurred, possibly parameter validation + warnings.warn( + "{0} occurred during set_params of param {1} on " + "{2}. It is recommended to delay parameter " + "validation until fit.".format(e_type, param_name, name) + ) + + change_warning_msg = ( + "Estimator's parameters changed after set_params raised {}".format( + e_type + ) + ) + params_before_exception = curr_params + curr_params = estimator.get_params(deep=False) + try: + assert set(params_before_exception.keys()) == set( + curr_params.keys() + ) + for k, v in curr_params.items(): + assert params_before_exception[k] is v + except AssertionError: + warnings.warn(change_warning_msg) + else: + curr_params = estimator.get_params(deep=False) + assert set(test_params.keys()) == set(curr_params.keys()), msg + for k, v in curr_params.items(): + assert test_params[k] is v, msg + test_params[param_name] = default_value + + +@ignore_warnings(category=FutureWarning) +def check_classifiers_regression_target(name, estimator_orig): + # Check if classifier throws an exception when fed regression targets + + X, y = _regression_dataset() + + X = _enforce_estimator_tags_X(estimator_orig, X) + e = clone(estimator_orig) + msg = "Unknown label type: " + if not _safe_tags(e, key="no_validation"): + with raises(ValueError, match=msg): + e.fit(X, y) + + +@ignore_warnings(category=FutureWarning) +def check_decision_proba_consistency(name, estimator_orig): + # Check whether an estimator having both decision_function and + # predict_proba methods has outputs with perfect rank correlation. + + centers = [(2, 2), (4, 4)] + X, y = make_blobs( + n_samples=100, + random_state=0, + n_features=4, + centers=centers, + cluster_std=1.0, + shuffle=True, + ) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=0 + ) + estimator = clone(estimator_orig) + + if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"): + estimator.fit(X_train, y_train) + # Since the link function from decision_function() to predict_proba() + # is sometimes not precise enough (typically expit), we round to the + # 10th decimal to avoid numerical issues: we compare the rank + # with deterministic ties rather than get platform specific rank + # inversions in case of machine level differences. + a = estimator.predict_proba(X_test)[:, 1].round(decimals=10) + b = estimator.decision_function(X_test).round(decimals=10) + + rank_proba, rank_score = rankdata(a), rankdata(b) + try: + assert_array_almost_equal(rank_proba, rank_score) + except AssertionError: + # Sometimes, the rounding applied on the probabilities will have + # ties that are not present in the scores because it is + # numerically more precise. In this case, we relax the test by + # grouping the decision function scores based on the probability + # rank and check that the score is monotonically increasing. + grouped_y_score = np.array( + [b[rank_proba == group].mean() for group in np.unique(rank_proba)] + ) + sorted_idx = np.argsort(grouped_y_score) + assert_array_equal(sorted_idx, np.arange(len(sorted_idx))) + + +def check_outliers_fit_predict(name, estimator_orig): + # Check fit_predict for outlier detectors. + + n_samples = 300 + X, _ = make_blobs(n_samples=n_samples, random_state=0) + X = shuffle(X, random_state=7) + n_samples, n_features = X.shape + estimator = clone(estimator_orig) + + set_random_state(estimator) + + y_pred = estimator.fit_predict(X) + assert y_pred.shape == (n_samples,) + assert y_pred.dtype.kind == "i" + assert_array_equal(np.unique(y_pred), np.array([-1, 1])) + + # check fit_predict = fit.predict when the estimator has both a predict and + # a fit_predict method. recall that it is already assumed here that the + # estimator has a fit_predict method + if hasattr(estimator, "predict"): + y_pred_2 = estimator.fit(X).predict(X) + assert_array_equal(y_pred, y_pred_2) + + if hasattr(estimator, "contamination"): + # proportion of outliers equal to contamination parameter when not + # set to 'auto' + expected_outliers = 30 + contamination = float(expected_outliers) / n_samples + estimator.set_params(contamination=contamination) + y_pred = estimator.fit_predict(X) + + num_outliers = np.sum(y_pred != 1) + # num_outliers should be equal to expected_outliers unless + # there are ties in the decision_function values. this can + # only be tested for estimators with a decision_function + # method + if num_outliers != expected_outliers and hasattr( + estimator, "decision_function" + ): + decision = estimator.decision_function(X) + check_outlier_corruption(num_outliers, expected_outliers, decision) + + +def check_fit_non_negative(name, estimator_orig): + # Check that proper warning is raised for non-negative X + # when tag requires_positive_X is present + X = np.array([[-1.0, 1], [-1.0, 1]]) + y = np.array([1, 2]) + estimator = clone(estimator_orig) + with raises(ValueError): + estimator.fit(X, y) + + +def check_fit_idempotent(name, estimator_orig): + # Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would + # check that the estimated parameters during training (e.g. coefs_) are + # the same, but having a universal comparison function for those + # attributes is difficult and full of edge cases. So instead we check that + # predict(), predict_proba(), decision_function() and transform() return + # the same results. + + check_methods = ["predict", "transform", "decision_function", "predict_proba"] + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params().keys(): + estimator.set_params(warm_start=False) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + if is_regressor(estimator_orig): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + train, test = next(ShuffleSplit(test_size=0.2, random_state=rng).split(X)) + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + # Fit for the first time + estimator.fit(X_train, y_train) + + result = { + method: getattr(estimator, method)(X_test) + for method in check_methods + if hasattr(estimator, method) + } + + # Fit again + set_random_state(estimator) + estimator.fit(X_train, y_train) + + for method in check_methods: + if hasattr(estimator, method): + new_result = getattr(estimator, method)(X_test) + if np.issubdtype(new_result.dtype, np.floating): + tol = 2 * np.finfo(new_result.dtype).eps + else: + tol = 2 * np.finfo(np.float64).eps + assert_allclose_dense_sparse( + result[method], + new_result, + atol=max(tol, 1e-9), + rtol=max(tol, 1e-7), + err_msg="Idempotency check failed for method {}".format(method), + ) + + +def check_fit_check_is_fitted(name, estimator_orig): + # Make sure that estimator doesn't pass check_is_fitted before calling fit + # and that passes check_is_fitted once it's fit. + + rng = np.random.RandomState(42) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params(): + estimator.set_params(warm_start=False) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + if is_regressor(estimator_orig): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + if not _safe_tags(estimator).get("stateless", False): + # stateless estimators (such as FunctionTransformer) are always "fit"! + try: + check_is_fitted(estimator) + raise AssertionError( + f"{estimator.__class__.__name__} passes check_is_fitted before being" + " fit!" + ) + except NotFittedError: + pass + estimator.fit(X, y) + try: + check_is_fitted(estimator) + except NotFittedError as e: + raise NotFittedError( + "Estimator fails to pass `check_is_fitted` even though it has been fit." + ) from e + + +def check_n_features_in(name, estimator_orig): + # Make sure that n_features_in_ attribute doesn't exist until fit is + # called, and that its value is correct. + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params(): + estimator.set_params(warm_start=False) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + if is_regressor(estimator_orig): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + assert not hasattr(estimator, "n_features_in_") + estimator.fit(X, y) + assert hasattr(estimator, "n_features_in_") + assert estimator.n_features_in_ == X.shape[1] + + +def check_requires_y_none(name, estimator_orig): + # Make sure that an estimator with requires_y=True fails gracefully when + # given y=None + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + + n_samples = 100 + X = rng.normal(loc=100, size=(n_samples, 2)) + X = _enforce_estimator_tags_X(estimator, X) + + expected_err_msgs = ( + "requires y to be passed, but the target y is None", + "Expected array-like (array or non-string sequence), got None", + "y should be a 1d array", + ) + + try: + estimator.fit(X, None) + except ValueError as ve: + if not any(msg in str(ve) for msg in expected_err_msgs): + raise ve + + +@ignore_warnings(category=FutureWarning) +def check_n_features_in_after_fitting(name, estimator_orig): + # Make sure that n_features_in are checked after fitting + tags = _safe_tags(estimator_orig) + + is_supported_X_types = ( + "2darray" in tags["X_types"] or "categorical" in tags["X_types"] + ) + + if not is_supported_X_types or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + if "warm_start" in estimator.get_params(): + estimator.set_params(warm_start=False) + + n_samples = 150 + X = rng.normal(size=(n_samples, 8)) + X = _enforce_estimator_tags_X(estimator, X) + + if is_regressor(estimator): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + estimator.fit(X, y) + assert estimator.n_features_in_ == X.shape[1] + + # check methods will check n_features_in_ + check_methods = [ + "predict", + "transform", + "decision_function", + "predict_proba", + "score", + ] + X_bad = X[:, [1]] + + msg = f"X has 1 features, but \\w+ is expecting {X.shape[1]} features as input" + for method in check_methods: + if not hasattr(estimator, method): + continue + + callable_method = getattr(estimator, method) + if method == "score": + callable_method = partial(callable_method, y=y) + + with raises(ValueError, match=msg): + callable_method(X_bad) + + # partial_fit will check in the second call + if not hasattr(estimator, "partial_fit"): + return + + estimator = clone(estimator_orig) + if is_classifier(estimator): + estimator.partial_fit(X, y, classes=np.unique(y)) + else: + estimator.partial_fit(X, y) + assert estimator.n_features_in_ == X.shape[1] + + with raises(ValueError, match=msg): + estimator.partial_fit(X_bad, y) + + +def check_estimator_get_tags_default_keys(name, estimator_orig): + # check that if _get_tags is implemented, it contains all keys from + # _DEFAULT_KEYS + estimator = clone(estimator_orig) + if not hasattr(estimator, "_get_tags"): + return + + tags_keys = set(estimator._get_tags().keys()) + default_tags_keys = set(_DEFAULT_TAGS.keys()) + assert tags_keys.intersection(default_tags_keys) == default_tags_keys, ( + f"{name}._get_tags() is missing entries for the following default tags" + f": {default_tags_keys - tags_keys.intersection(default_tags_keys)}" + ) + + +def check_dataframe_column_names_consistency(name, estimator_orig): + try: + import pandas as pd + except ImportError: + raise SkipTest( + "pandas is not installed: not checking column name consistency for pandas" + ) + + tags = _safe_tags(estimator_orig) + is_supported_X_types = ( + "2darray" in tags["X_types"] or "categorical" in tags["X_types"] + ) + + if not is_supported_X_types or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + + estimator = clone(estimator_orig) + set_random_state(estimator) + + X_orig = rng.normal(size=(150, 8)) + + X_orig = _enforce_estimator_tags_X(estimator, X_orig) + n_samples, n_features = X_orig.shape + + names = np.array([f"col_{i}" for i in range(n_features)]) + X = pd.DataFrame(X_orig, columns=names, copy=False) + + if is_regressor(estimator): + y = rng.normal(size=n_samples) + else: + y = rng.randint(low=0, high=2, size=n_samples) + y = _enforce_estimator_tags_y(estimator, y) + + # Check that calling `fit` does not raise any warnings about feature names. + with warnings.catch_warnings(): + warnings.filterwarnings( + "error", + message="X does not have valid feature names", + category=UserWarning, + module="sklearn", + ) + estimator.fit(X, y) + + if not hasattr(estimator, "feature_names_in_"): + raise ValueError( + "Estimator does not have a feature_names_in_ " + "attribute after fitting with a dataframe" + ) + assert isinstance(estimator.feature_names_in_, np.ndarray) + assert estimator.feature_names_in_.dtype == object + assert_array_equal(estimator.feature_names_in_, names) + + # Only check sklearn estimators for feature_names_in_ in docstring + module_name = estimator_orig.__module__ + if ( + module_name.startswith("sklearn.") + and not ("test_" in module_name or module_name.endswith("_testing")) + and ("feature_names_in_" not in (estimator_orig.__doc__)) + ): + raise ValueError( + f"Estimator {name} does not document its feature_names_in_ attribute" + ) + + check_methods = [] + for method in ( + "predict", + "transform", + "decision_function", + "predict_proba", + "score", + "score_samples", + "predict_log_proba", + ): + if not hasattr(estimator, method): + continue + + callable_method = getattr(estimator, method) + if method == "score": + callable_method = partial(callable_method, y=y) + check_methods.append((method, callable_method)) + + for _, method in check_methods: + with warnings.catch_warnings(): + warnings.filterwarnings( + "error", + message="X does not have valid feature names", + category=UserWarning, + module="sklearn", + ) + method(X) # works without UserWarning for valid features + + invalid_names = [ + (names[::-1], "Feature names must be in the same order as they were in fit."), + ( + [f"another_prefix_{i}" for i in range(n_features)], + ( + "Feature names unseen at fit time:\n- another_prefix_0\n-" + " another_prefix_1\n" + ), + ), + ( + names[:3], + f"Feature names seen at fit time, yet now missing:\n- {min(names[3:])}\n", + ), + ] + params = { + key: value + for key, value in estimator.get_params().items() + if "early_stopping" in key + } + early_stopping_enabled = any(value is True for value in params.values()) + + for invalid_name, additional_message in invalid_names: + X_bad = pd.DataFrame(X, columns=invalid_name, copy=False) + + expected_msg = re.escape( + "The feature names should match those that were passed during fit.\n" + f"{additional_message}" + ) + for name, method in check_methods: + with raises( + ValueError, match=expected_msg, err_msg=f"{name} did not raise" + ): + method(X_bad) + + # partial_fit checks on second call + # Do not call partial fit if early_stopping is on + if not hasattr(estimator, "partial_fit") or early_stopping_enabled: + continue + + estimator = clone(estimator_orig) + if is_classifier(estimator): + classes = np.unique(y) + estimator.partial_fit(X, y, classes=classes) + else: + estimator.partial_fit(X, y) + + with raises(ValueError, match=expected_msg): + estimator.partial_fit(X_bad, y) + + +def check_transformer_get_feature_names_out(name, transformer_orig): + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + + transformer = clone(transformer_orig) + X = _enforce_estimator_tags_X(transformer, X) + + n_features = X.shape[1] + set_random_state(transformer) + + y_ = y + if name in CROSS_DECOMPOSITION: + y_ = np.c_[np.asarray(y), np.asarray(y)] + y_[::2, 1] *= 2 + + X_transform = transformer.fit_transform(X, y=y_) + input_features = [f"feature{i}" for i in range(n_features)] + + # input_features names is not the same length as n_features_in_ + with raises(ValueError, match="input_features should have length equal"): + transformer.get_feature_names_out(input_features[::2]) + + feature_names_out = transformer.get_feature_names_out(input_features) + assert feature_names_out is not None + assert isinstance(feature_names_out, np.ndarray) + assert feature_names_out.dtype == object + assert all(isinstance(name, str) for name in feature_names_out) + + if isinstance(X_transform, tuple): + n_features_out = X_transform[0].shape[1] + else: + n_features_out = X_transform.shape[1] + + assert ( + len(feature_names_out) == n_features_out + ), f"Expected {n_features_out} feature names, got {len(feature_names_out)}" + + +def check_transformer_get_feature_names_out_pandas(name, transformer_orig): + try: + import pandas as pd + except ImportError: + raise SkipTest( + "pandas is not installed: not checking column name consistency for pandas" + ) + + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + X, y = make_blobs( + n_samples=30, + centers=[[0, 0, 0], [1, 1, 1]], + random_state=0, + n_features=2, + cluster_std=0.1, + ) + X = StandardScaler().fit_transform(X) + + transformer = clone(transformer_orig) + X = _enforce_estimator_tags_X(transformer, X) + + n_features = X.shape[1] + set_random_state(transformer) + + y_ = y + if name in CROSS_DECOMPOSITION: + y_ = np.c_[np.asarray(y), np.asarray(y)] + y_[::2, 1] *= 2 + + feature_names_in = [f"col{i}" for i in range(n_features)] + df = pd.DataFrame(X, columns=feature_names_in, copy=False) + X_transform = transformer.fit_transform(df, y=y_) + + # error is raised when `input_features` do not match feature_names_in + invalid_feature_names = [f"bad{i}" for i in range(n_features)] + with raises(ValueError, match="input_features is not equal to feature_names_in_"): + transformer.get_feature_names_out(invalid_feature_names) + + feature_names_out_default = transformer.get_feature_names_out() + feature_names_in_explicit_names = transformer.get_feature_names_out( + feature_names_in + ) + assert_array_equal(feature_names_out_default, feature_names_in_explicit_names) + + if isinstance(X_transform, tuple): + n_features_out = X_transform[0].shape[1] + else: + n_features_out = X_transform.shape[1] + + assert ( + len(feature_names_out_default) == n_features_out + ), f"Expected {n_features_out} feature names, got {len(feature_names_out_default)}" + + +def check_param_validation(name, estimator_orig): + # Check that an informative error is raised when the value of a constructor + # parameter does not have an appropriate type or value. + rng = np.random.RandomState(0) + X = rng.uniform(size=(20, 5)) + y = rng.randint(0, 2, size=20) + y = _enforce_estimator_tags_y(estimator_orig, y) + + estimator_params = estimator_orig.get_params(deep=False).keys() + + # check that there is a constraint for each parameter + if estimator_params: + validation_params = estimator_orig._parameter_constraints.keys() + unexpected_params = set(validation_params) - set(estimator_params) + missing_params = set(estimator_params) - set(validation_params) + err_msg = ( + f"Mismatch between _parameter_constraints and the parameters of {name}." + f"\nConsider the unexpected parameters {unexpected_params} and expected but" + f" missing parameters {missing_params}" + ) + assert validation_params == estimator_params, err_msg + + # this object does not have a valid type for sure for all params + param_with_bad_type = type("BadType", (), {})() + + fit_methods = ["fit", "partial_fit", "fit_transform", "fit_predict"] + + for param_name in estimator_params: + constraints = estimator_orig._parameter_constraints[param_name] + + if constraints == "no_validation": + # This parameter is not validated + continue + + # Mixing an interval of reals and an interval of integers must be avoided. + if any( + isinstance(constraint, Interval) and constraint.type == Integral + for constraint in constraints + ) and any( + isinstance(constraint, Interval) and constraint.type == Real + for constraint in constraints + ): + raise ValueError( + f"The constraint for parameter {param_name} of {name} can't have a mix" + " of intervals of Integral and Real types. Use the type RealNotInt" + " instead of Real." + ) + + match = rf"The '{param_name}' parameter of {name} must be .* Got .* instead." + err_msg = ( + f"{name} does not raise an informative error message when the " + f"parameter {param_name} does not have a valid type or value." + ) + + estimator = clone(estimator_orig) + + # First, check that the error is raised if param doesn't match any valid type. + estimator.set_params(**{param_name: param_with_bad_type}) + + for method in fit_methods: + if not hasattr(estimator, method): + # the method is not accessible with the current set of parameters + continue + + err_msg = ( + f"{name} does not raise an informative error message when the parameter" + f" {param_name} does not have a valid type. If any Python type is" + " valid, the constraint should be 'no_validation'." + ) + + with raises(InvalidParameterError, match=match, err_msg=err_msg): + if any( + isinstance(X_type, str) and X_type.endswith("labels") + for X_type in _safe_tags(estimator, key="X_types") + ): + # The estimator is a label transformer and take only `y` + getattr(estimator, method)(y) + else: + getattr(estimator, method)(X, y) + + # Then, for constraints that are more than a type constraint, check that the + # error is raised if param does match a valid type but does not match any valid + # value for this type. + constraints = [make_constraint(constraint) for constraint in constraints] + + for constraint in constraints: + try: + bad_value = generate_invalid_param_val(constraint) + except NotImplementedError: + continue + + estimator.set_params(**{param_name: bad_value}) + + for method in fit_methods: + if not hasattr(estimator, method): + # the method is not accessible with the current set of parameters + continue + + err_msg = ( + f"{name} does not raise an informative error message when the " + f"parameter {param_name} does not have a valid value.\n" + "Constraints should be disjoint. For instance " + "[StrOptions({'a_string'}), str] is not a acceptable set of " + "constraint because generating an invalid string for the first " + "constraint will always produce a valid string for the second " + "constraint." + ) + + with raises(InvalidParameterError, match=match, err_msg=err_msg): + if any( + X_type.endswith("labels") + for X_type in _safe_tags(estimator, key="X_types") + ): + # The estimator is a label transformer and take only `y` + getattr(estimator, method)(y) + else: + getattr(estimator, method)(X, y) + + +def check_set_output_transform(name, transformer_orig): + # Check transformer.set_output with the default configuration does not + # change the transform output. + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + transformer = clone(transformer_orig) + + X = rng.uniform(size=(20, 5)) + X = _enforce_estimator_tags_X(transformer_orig, X) + y = rng.randint(0, 2, size=20) + y = _enforce_estimator_tags_y(transformer_orig, y) + set_random_state(transformer) + + def fit_then_transform(est): + if name in CROSS_DECOMPOSITION: + return est.fit(X, y).transform(X, y) + return est.fit(X, y).transform(X) + + def fit_transform(est): + return est.fit_transform(X, y) + + transform_methods = { + "transform": fit_then_transform, + "fit_transform": fit_transform, + } + for name, transform_method in transform_methods.items(): + transformer = clone(transformer) + if not hasattr(transformer, name): + continue + X_trans_no_setting = transform_method(transformer) + + # Auto wrapping only wraps the first array + if name in CROSS_DECOMPOSITION: + X_trans_no_setting = X_trans_no_setting[0] + + transformer.set_output(transform="default") + X_trans_default = transform_method(transformer) + + if name in CROSS_DECOMPOSITION: + X_trans_default = X_trans_default[0] + + # Default and no setting -> returns the same transformation + assert_allclose_dense_sparse(X_trans_no_setting, X_trans_default) + + +def _output_from_fit_transform(transformer, name, X, df, y): + """Generate output to test `set_output` for different configuration: + + - calling either `fit.transform` or `fit_transform`; + - passing either a dataframe or a numpy array to fit; + - passing either a dataframe or a numpy array to transform. + """ + outputs = {} + + # fit then transform case: + cases = [ + ("fit.transform/df/df", df, df), + ("fit.transform/df/array", df, X), + ("fit.transform/array/df", X, df), + ("fit.transform/array/array", X, X), + ] + if all(hasattr(transformer, meth) for meth in ["fit", "transform"]): + for ( + case, + data_fit, + data_transform, + ) in cases: + transformer.fit(data_fit, y) + if name in CROSS_DECOMPOSITION: + X_trans, _ = transformer.transform(data_transform, y) + else: + X_trans = transformer.transform(data_transform) + outputs[case] = (X_trans, transformer.get_feature_names_out()) + + # fit_transform case: + cases = [ + ("fit_transform/df", df), + ("fit_transform/array", X), + ] + if hasattr(transformer, "fit_transform"): + for case, data in cases: + if name in CROSS_DECOMPOSITION: + X_trans, _ = transformer.fit_transform(data, y) + else: + X_trans = transformer.fit_transform(data, y) + outputs[case] = (X_trans, transformer.get_feature_names_out()) + + return outputs + + +def _check_generated_dataframe( + name, + case, + index, + outputs_default, + outputs_dataframe_lib, + is_supported_dataframe, + create_dataframe, + assert_frame_equal, +): + """Check if the generated DataFrame by the transformer is valid. + + The DataFrame implementation is specified through the parameters of this function. + + Parameters + ---------- + name : str + The name of the transformer. + case : str + A single case from the cases generated by `_output_from_fit_transform`. + index : index or None + The index of the DataFrame. `None` if the library does not implement a DataFrame + with an index. + outputs_default : tuple + A tuple containing the output data and feature names for the default output. + outputs_dataframe_lib : tuple + A tuple containing the output data and feature names for the pandas case. + is_supported_dataframe : callable + A callable that takes a DataFrame instance as input and return whether or + E.g. `lambda X: isintance(X, pd.DataFrame)`. + create_dataframe : callable + A callable taking as parameters `data`, `columns`, and `index` and returns + a callable. Be aware that `index` can be ignored. For example, polars dataframes + would ignore the idnex. + assert_frame_equal : callable + A callable taking 2 dataframes to compare if they are equal. + """ + X_trans, feature_names_default = outputs_default + df_trans, feature_names_dataframe_lib = outputs_dataframe_lib + + assert is_supported_dataframe(df_trans) + # We always rely on the output of `get_feature_names_out` of the + # transformer used to generate the dataframe as a ground-truth of the + # columns. + # If a dataframe is passed into transform, then the output should have the same + # index + expected_index = index if case.endswith("df") else None + expected_dataframe = create_dataframe( + X_trans, columns=feature_names_dataframe_lib, index=expected_index + ) + + try: + assert_frame_equal(df_trans, expected_dataframe) + except AssertionError as e: + raise AssertionError( + f"{name} does not generate a valid dataframe in the {case} " + "case. The generated dataframe is not equal to the expected " + f"dataframe. The error message is: {e}" + ) from e + + +def _check_set_output_transform_dataframe( + name, + transformer_orig, + *, + dataframe_lib, + is_supported_dataframe, + create_dataframe, + assert_frame_equal, + context, +): + """Check that a transformer can output a DataFrame when requested. + + The DataFrame implementation is specified through the parameters of this function. + + Parameters + ---------- + name : str + The name of the transformer. + transformer_orig : estimator + The original transformer instance. + dataframe_lib : str + The name of the library implementing the DataFrame. + is_supported_dataframe : callable + A callable that takes a DataFrame instance as input and returns whether or + not it is supported by the dataframe library. + E.g. `lambda X: isintance(X, pd.DataFrame)`. + create_dataframe : callable + A callable taking as parameters `data`, `columns`, and `index` and returns + a callable. Be aware that `index` can be ignored. For example, polars dataframes + will ignore the index. + assert_frame_equal : callable + A callable taking 2 dataframes to compare if they are equal. + context : {"local", "global"} + Whether to use a local context by setting `set_output(...)` on the transformer + or a global context by using the `with config_context(...)` + """ + # Check transformer.set_output configures the output of transform="pandas". + tags = transformer_orig._get_tags() + if "2darray" not in tags["X_types"] or tags["no_validation"]: + return + + rng = np.random.RandomState(0) + transformer = clone(transformer_orig) + + X = rng.uniform(size=(20, 5)) + X = _enforce_estimator_tags_X(transformer_orig, X) + y = rng.randint(0, 2, size=20) + y = _enforce_estimator_tags_y(transformer_orig, y) + set_random_state(transformer) + + feature_names_in = [f"col{i}" for i in range(X.shape[1])] + index = [f"index{i}" for i in range(X.shape[0])] + df = create_dataframe(X, columns=feature_names_in, index=index) + + transformer_default = clone(transformer).set_output(transform="default") + outputs_default = _output_from_fit_transform(transformer_default, name, X, df, y) + + if context == "local": + transformer_df = clone(transformer).set_output(transform=dataframe_lib) + context_to_use = nullcontext() + else: # global + transformer_df = clone(transformer) + context_to_use = config_context(transform_output=dataframe_lib) + + try: + with context_to_use: + outputs_df = _output_from_fit_transform(transformer_df, name, X, df, y) + except ValueError as e: + # transformer does not support sparse data + capitalized_lib = dataframe_lib.capitalize() + error_message = str(e) + assert ( + f"{capitalized_lib} output does not support sparse data." in error_message + or "The transformer outputs a scipy sparse matrix." in error_message + ), e + return + + for case in outputs_default: + _check_generated_dataframe( + name, + case, + index, + outputs_default[case], + outputs_df[case], + is_supported_dataframe, + create_dataframe, + assert_frame_equal, + ) + + +def _check_set_output_transform_pandas_context(name, transformer_orig, context): + try: + import pandas as pd + except ImportError: # pragma: no cover + raise SkipTest("pandas is not installed: not checking set output") + + _check_set_output_transform_dataframe( + name, + transformer_orig, + dataframe_lib="pandas", + is_supported_dataframe=lambda X: isinstance(X, pd.DataFrame), + create_dataframe=lambda X, columns, index: pd.DataFrame( + X, columns=columns, copy=False, index=index + ), + assert_frame_equal=pd.testing.assert_frame_equal, + context=context, + ) + + +def check_set_output_transform_pandas(name, transformer_orig): + _check_set_output_transform_pandas_context(name, transformer_orig, "local") + + +def check_global_output_transform_pandas(name, transformer_orig): + _check_set_output_transform_pandas_context(name, transformer_orig, "global") + + +def _check_set_output_transform_polars_context(name, transformer_orig, context): + try: + import polars as pl + from polars.testing import assert_frame_equal + except ImportError: # pragma: no cover + raise SkipTest("polars is not installed: not checking set output") + + def create_dataframe(X, columns, index): + if isinstance(columns, np.ndarray): + columns = columns.tolist() + + return pl.DataFrame(X, schema=columns, orient="row") + + _check_set_output_transform_dataframe( + name, + transformer_orig, + dataframe_lib="polars", + is_supported_dataframe=lambda X: isinstance(X, pl.DataFrame), + create_dataframe=create_dataframe, + assert_frame_equal=assert_frame_equal, + context=context, + ) + + +def check_set_output_transform_polars(name, transformer_orig): + _check_set_output_transform_polars_context(name, transformer_orig, "local") + + +def check_global_set_output_transform_polars(name, transformer_orig): + _check_set_output_transform_polars_context(name, transformer_orig, "global") diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/extmath.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/extmath.py new file mode 100644 index 0000000000000000000000000000000000000000..9336ad851665926010bd7c99fc6f7b35fae2a574 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/extmath.py @@ -0,0 +1,1284 @@ +""" +The :mod:`sklearn.utils.extmath` module includes utilities to perform +optimal mathematical operations in scikit-learn that are not available in SciPy. +""" +# Authors: Gael Varoquaux +# Alexandre Gramfort +# Alexandre T. Passos +# Olivier Grisel +# Lars Buitinck +# Stefan van der Walt +# Kyle Kastner +# Giorgio Patrini +# License: BSD 3 clause + +import warnings +from functools import partial +from numbers import Integral + +import numpy as np +from scipy import linalg, sparse + +from ..utils import deprecated +from ..utils._param_validation import Interval, StrOptions, validate_params +from . import check_random_state +from ._array_api import _is_numpy_namespace, device, get_namespace +from .sparsefuncs_fast import csr_row_norms +from .validation import check_array + + +def squared_norm(x): + """Squared Euclidean or Frobenius norm of x. + + Faster than norm(x) ** 2. + + Parameters + ---------- + x : array-like + The input array which could be either be a vector or a 2 dimensional array. + + Returns + ------- + float + The Euclidean norm when x is a vector, the Frobenius norm when x + is a matrix (2-d array). + """ + x = np.ravel(x, order="K") + if np.issubdtype(x.dtype, np.integer): + warnings.warn( + ( + "Array type is integer, np.dot may overflow. " + "Data should be float type to avoid this issue" + ), + UserWarning, + ) + return np.dot(x, x) + + +def row_norms(X, squared=False): + """Row-wise (squared) Euclidean norm of X. + + Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse + matrices and does not create an X.shape-sized temporary. + + Performs no input validation. + + Parameters + ---------- + X : array-like + The input array. + squared : bool, default=False + If True, return squared norms. + + Returns + ------- + array-like + The row-wise (squared) Euclidean norm of X. + """ + if sparse.issparse(X): + X = X.tocsr() + norms = csr_row_norms(X) + if not squared: + norms = np.sqrt(norms) + else: + xp, _ = get_namespace(X) + if _is_numpy_namespace(xp): + X = np.asarray(X) + norms = np.einsum("ij,ij->i", X, X) + norms = xp.asarray(norms) + else: + norms = xp.sum(xp.multiply(X, X), axis=1) + if not squared: + norms = xp.sqrt(norms) + return norms + + +def fast_logdet(A): + """Compute logarithm of determinant of a square matrix. + + The (natural) logarithm of the determinant of a square matrix + is returned if det(A) is non-negative and well defined. + If the determinant is zero or negative returns -Inf. + + Equivalent to : np.log(np.det(A)) but more robust. + + Parameters + ---------- + A : array_like of shape (n, n) + The square matrix. + + Returns + ------- + logdet : float + When det(A) is strictly positive, log(det(A)) is returned. + When det(A) is non-positive or not defined, then -inf is returned. + + See Also + -------- + numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant + of an array. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils.extmath import fast_logdet + >>> a = np.array([[5, 1], [2, 8]]) + >>> fast_logdet(a) + 3.6375861597263857 + """ + xp, _ = get_namespace(A) + sign, ld = xp.linalg.slogdet(A) + if not sign > 0: + return -xp.inf + return ld + + +def density(w): + """Compute density of a sparse vector. + + Parameters + ---------- + w : {ndarray, sparse matrix} + The input data can be numpy ndarray or a sparse matrix. + + Returns + ------- + float + The density of w, between 0 and 1. + + Examples + -------- + >>> from scipy import sparse + >>> from sklearn.utils.extmath import density + >>> X = sparse.random(10, 10, density=0.25, random_state=0) + >>> density(X) + 0.25 + """ + if hasattr(w, "toarray"): + d = float(w.nnz) / (w.shape[0] * w.shape[1]) + else: + d = 0 if w is None else float((w != 0).sum()) / w.size + return d + + +def safe_sparse_dot(a, b, *, dense_output=False): + """Dot product that handle the sparse matrix case correctly. + + Parameters + ---------- + a : {ndarray, sparse matrix} + b : {ndarray, sparse matrix} + dense_output : bool, default=False + When False, ``a`` and ``b`` both being sparse will yield sparse output. + When True, output will always be a dense array. + + Returns + ------- + dot_product : {ndarray, sparse matrix} + Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``. + + Examples + -------- + >>> from scipy.sparse import csr_matrix + >>> from sklearn.utils.extmath import safe_sparse_dot + >>> X = csr_matrix([[1, 2], [3, 4], [5, 6]]) + >>> dot_product = safe_sparse_dot(X, X.T) + >>> dot_product.toarray() + array([[ 5, 11, 17], + [11, 25, 39], + [17, 39, 61]]) + """ + if a.ndim > 2 or b.ndim > 2: + if sparse.issparse(a): + # sparse is always 2D. Implies b is 3D+ + # [i, j] @ [k, ..., l, m, n] -> [i, k, ..., l, n] + b_ = np.rollaxis(b, -2) + b_2d = b_.reshape((b.shape[-2], -1)) + ret = a @ b_2d + ret = ret.reshape(a.shape[0], *b_.shape[1:]) + elif sparse.issparse(b): + # sparse is always 2D. Implies a is 3D+ + # [k, ..., l, m] @ [i, j] -> [k, ..., l, j] + a_2d = a.reshape(-1, a.shape[-1]) + ret = a_2d @ b + ret = ret.reshape(*a.shape[:-1], b.shape[1]) + else: + ret = np.dot(a, b) + else: + ret = a @ b + + if ( + sparse.issparse(a) + and sparse.issparse(b) + and dense_output + and hasattr(ret, "toarray") + ): + return ret.toarray() + return ret + + +def randomized_range_finder( + A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None +): + """Compute an orthonormal matrix whose range approximates the range of A. + + Parameters + ---------- + A : 2D array + The input data matrix. + + size : int + Size of the return array. + + n_iter : int + Number of power iterations used to stabilize the result. + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Whether the power iterations are normalized with step-by-step + QR factorization (the slowest but most accurate), 'none' + (the fastest but numerically unstable when `n_iter` is large, e.g. + typically 5 or larger), or 'LU' factorization (numerically stable + but can lose slightly in accuracy). The 'auto' mode applies no + normalization if `n_iter` <= 2 and switches to LU otherwise. + + .. versionadded:: 0.18 + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator to use when shuffling + the data, i.e. getting the random vectors to initialize the algorithm. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + Q : ndarray + A (size x size) projection matrix, the range of which + approximates well the range of the input matrix A. + + Notes + ----- + + Follows Algorithm 4.3 of + :arxiv:`"Finding structure with randomness: + Stochastic algorithms for constructing approximate matrix decompositions" + <0909.4061>` + Halko, et al. (2009) + + An implementation of a randomized algorithm for principal component + analysis + A. Szlam et al. 2014 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils.extmath import randomized_range_finder + >>> A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> randomized_range_finder(A, size=2, n_iter=2, random_state=42) + array([[-0.21..., 0.88...], + [-0.52..., 0.24...], + [-0.82..., -0.38...]]) + """ + xp, is_array_api_compliant = get_namespace(A) + random_state = check_random_state(random_state) + + # Generating normal random vectors with shape: (A.shape[1], size) + # XXX: generate random number directly from xp if it's possible + # one day. + Q = xp.asarray(random_state.normal(size=(A.shape[1], size))) + if hasattr(A, "dtype") and xp.isdtype(A.dtype, kind="real floating"): + # Use float32 computation and components if A has a float32 dtype. + Q = xp.astype(Q, A.dtype, copy=False) + + # Move Q to device if needed only after converting to float32 if needed to + # avoid allocating unnecessary memory on the device. + + # Note: we cannot combine the astype and to_device operations in one go + # using xp.asarray(..., dtype=dtype, device=device) because downcasting + # from float64 to float32 in asarray might not always be accepted as only + # casts following type promotion rules are guarateed to work. + # https://github.com/data-apis/array-api/issues/647 + if is_array_api_compliant: + Q = xp.asarray(Q, device=device(A)) + + # Deal with "auto" mode + if power_iteration_normalizer == "auto": + if n_iter <= 2: + power_iteration_normalizer = "none" + elif is_array_api_compliant: + # XXX: https://github.com/data-apis/array-api/issues/627 + warnings.warn( + "Array API does not support LU factorization, falling back to QR" + " instead. Set `power_iteration_normalizer='QR'` explicitly to silence" + " this warning." + ) + power_iteration_normalizer = "QR" + else: + power_iteration_normalizer = "LU" + elif power_iteration_normalizer == "LU" and is_array_api_compliant: + raise ValueError( + "Array API does not support LU factorization. Set " + "`power_iteration_normalizer='QR'` instead." + ) + + if is_array_api_compliant: + qr_normalizer = partial(xp.linalg.qr, mode="reduced") + else: + # Use scipy.linalg instead of numpy.linalg when not explicitly + # using the Array API. + qr_normalizer = partial(linalg.qr, mode="economic") + + if power_iteration_normalizer == "QR": + normalizer = qr_normalizer + elif power_iteration_normalizer == "LU": + normalizer = partial(linalg.lu, permute_l=True) + else: + normalizer = lambda x: (x, None) + + # Perform power iterations with Q to further 'imprint' the top + # singular vectors of A in Q + for _ in range(n_iter): + Q, _ = normalizer(A @ Q) + Q, _ = normalizer(A.T @ Q) + + # Sample the range of A using by linear projection of Q + # Extract an orthonormal basis + Q, _ = qr_normalizer(A @ Q) + + return Q + + +@validate_params( + { + "M": [np.ndarray, "sparse matrix"], + "n_components": [Interval(Integral, 1, None, closed="left")], + "n_oversamples": [Interval(Integral, 0, None, closed="left")], + "n_iter": [Interval(Integral, 0, None, closed="left"), StrOptions({"auto"})], + "power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})], + "transpose": ["boolean", StrOptions({"auto"})], + "flip_sign": ["boolean"], + "random_state": ["random_state"], + "svd_lapack_driver": [StrOptions({"gesdd", "gesvd"})], + }, + prefer_skip_nested_validation=True, +) +def randomized_svd( + M, + n_components, + *, + n_oversamples=10, + n_iter="auto", + power_iteration_normalizer="auto", + transpose="auto", + flip_sign=True, + random_state=None, + svd_lapack_driver="gesdd", +): + """Compute a truncated randomized SVD. + + This method solves the fixed-rank approximation problem described in [1]_ + (problem (1.5), p5). + + Parameters + ---------- + M : {ndarray, sparse matrix} + Matrix to decompose. + + n_components : int + Number of singular values and vectors to extract. + + n_oversamples : int, default=10 + Additional number of random vectors to sample the range of `M` so as + to ensure proper conditioning. The total number of random vectors + used to find the range of `M` is `n_components + n_oversamples`. Smaller + number can improve speed but can negatively impact the quality of + approximation of singular vectors and singular values. Users might wish + to increase this parameter up to `2*k - n_components` where k is the + effective rank, for large matrices, noisy problems, matrices with + slowly decaying spectrums, or to increase precision accuracy. See [1]_ + (pages 5, 23 and 26). + + n_iter : int or 'auto', default='auto' + Number of power iterations. It can be used to deal with very noisy + problems. When 'auto', it is set to 4, unless `n_components` is small + (< .1 * min(X.shape)) in which case `n_iter` is set to 7. + This improves precision with few components. Note that in general + users should rather increase `n_oversamples` before increasing `n_iter` + as the principle of the randomized method is to avoid usage of these + more costly power iterations steps. When `n_components` is equal + or greater to the effective matrix rank and the spectrum does not + present a slow decay, `n_iter=0` or `1` should even work fine in theory + (see [1]_ page 9). + + .. versionchanged:: 0.18 + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Whether the power iterations are normalized with step-by-step + QR factorization (the slowest but most accurate), 'none' + (the fastest but numerically unstable when `n_iter` is large, e.g. + typically 5 or larger), or 'LU' factorization (numerically stable + but can lose slightly in accuracy). The 'auto' mode applies no + normalization if `n_iter` <= 2 and switches to LU otherwise. + + .. versionadded:: 0.18 + + transpose : bool or 'auto', default='auto' + Whether the algorithm should be applied to M.T instead of M. The + result should approximately be the same. The 'auto' mode will + trigger the transposition if M.shape[1] > M.shape[0] since this + implementation of randomized SVD tend to be a little faster in that + case. + + .. versionchanged:: 0.18 + + flip_sign : bool, default=True + The output of a singular value decomposition is only unique up to a + permutation of the signs of the singular vectors. If `flip_sign` is + set to `True`, the sign ambiguity is resolved by making the largest + loadings for each component in the left singular vectors positive. + + random_state : int, RandomState instance or None, default='warn' + The seed of the pseudo random number generator to use when + shuffling the data, i.e. getting the random vectors to initialize + the algorithm. Pass an int for reproducible results across multiple + function calls. See :term:`Glossary `. + + .. versionchanged:: 1.2 + The default value changed from 0 to None. + + svd_lapack_driver : {"gesdd", "gesvd"}, default="gesdd" + Whether to use the more efficient divide-and-conquer approach + (`"gesdd"`) or more general rectangular approach (`"gesvd"`) to compute + the SVD of the matrix B, which is the projection of M into a low + dimensional subspace, as described in [1]_. + + .. versionadded:: 1.2 + + Returns + ------- + u : ndarray of shape (n_samples, n_components) + Unitary matrix having left singular vectors with signs flipped as columns. + s : ndarray of shape (n_components,) + The singular values, sorted in non-increasing order. + vh : ndarray of shape (n_components, n_features) + Unitary matrix having right singular vectors with signs flipped as rows. + + Notes + ----- + This algorithm finds a (usually very good) approximate truncated + singular value decomposition using randomization to speed up the + computations. It is particularly fast on large matrices on which + you wish to extract only a small number of components. In order to + obtain further speed up, `n_iter` can be set <=2 (at the cost of + loss of precision). To increase the precision it is recommended to + increase `n_oversamples`, up to `2*k-n_components` where k is the + effective rank. Usually, `n_components` is chosen to be greater than k + so increasing `n_oversamples` up to `n_components` should be enough. + + References + ---------- + .. [1] :arxiv:`"Finding structure with randomness: + Stochastic algorithms for constructing approximate matrix decompositions" + <0909.4061>` + Halko, et al. (2009) + + .. [2] A randomized algorithm for the decomposition of matrices + Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert + + .. [3] An implementation of a randomized algorithm for principal component + analysis A. Szlam et al. 2014 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils.extmath import randomized_svd + >>> a = np.array([[1, 2, 3, 5], + ... [3, 4, 5, 6], + ... [7, 8, 9, 10]]) + >>> U, s, Vh = randomized_svd(a, n_components=2, random_state=0) + >>> U.shape, s.shape, Vh.shape + ((3, 2), (2,), (2, 4)) + """ + if sparse.issparse(M) and M.format in ("lil", "dok"): + warnings.warn( + "Calculating SVD of a {} is expensive. " + "csr_matrix is more efficient.".format(type(M).__name__), + sparse.SparseEfficiencyWarning, + ) + + random_state = check_random_state(random_state) + n_random = n_components + n_oversamples + n_samples, n_features = M.shape + + if n_iter == "auto": + # Checks if the number of iterations is explicitly specified + # Adjust n_iter. 7 was found a good compromise for PCA. See #5299 + n_iter = 7 if n_components < 0.1 * min(M.shape) else 4 + + if transpose == "auto": + transpose = n_samples < n_features + if transpose: + # this implementation is a bit faster with smaller shape[1] + M = M.T + + Q = randomized_range_finder( + M, + size=n_random, + n_iter=n_iter, + power_iteration_normalizer=power_iteration_normalizer, + random_state=random_state, + ) + + # project M to the (k + p) dimensional space using the basis vectors + B = Q.T @ M + + # compute the SVD on the thin matrix: (k + p) wide + xp, is_array_api_compliant = get_namespace(B) + if is_array_api_compliant: + Uhat, s, Vt = xp.linalg.svd(B, full_matrices=False) + else: + # When when array_api_dispatch is disabled, rely on scipy.linalg + # instead of numpy.linalg to avoid introducing a behavior change w.r.t. + # previous versions of scikit-learn. + Uhat, s, Vt = linalg.svd( + B, full_matrices=False, lapack_driver=svd_lapack_driver + ) + del B + U = Q @ Uhat + + if flip_sign: + if not transpose: + U, Vt = svd_flip(U, Vt) + else: + # In case of transpose u_based_decision=false + # to actually flip based on u and not v. + U, Vt = svd_flip(U, Vt, u_based_decision=False) + + if transpose: + # transpose back the results according to the input convention + return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T + else: + return U[:, :n_components], s[:n_components], Vt[:n_components, :] + + +def _randomized_eigsh( + M, + n_components, + *, + n_oversamples=10, + n_iter="auto", + power_iteration_normalizer="auto", + selection="module", + random_state=None, +): + """Computes a truncated eigendecomposition using randomized methods + + This method solves the fixed-rank approximation problem described in the + Halko et al paper. + + The choice of which components to select can be tuned with the `selection` + parameter. + + .. versionadded:: 0.24 + + Parameters + ---------- + M : ndarray or sparse matrix + Matrix to decompose, it should be real symmetric square or complex + hermitian + + n_components : int + Number of eigenvalues and vectors to extract. + + n_oversamples : int, default=10 + Additional number of random vectors to sample the range of M so as + to ensure proper conditioning. The total number of random vectors + used to find the range of M is n_components + n_oversamples. Smaller + number can improve speed but can negatively impact the quality of + approximation of eigenvectors and eigenvalues. Users might wish + to increase this parameter up to `2*k - n_components` where k is the + effective rank, for large matrices, noisy problems, matrices with + slowly decaying spectrums, or to increase precision accuracy. See Halko + et al (pages 5, 23 and 26). + + n_iter : int or 'auto', default='auto' + Number of power iterations. It can be used to deal with very noisy + problems. When 'auto', it is set to 4, unless `n_components` is small + (< .1 * min(X.shape)) in which case `n_iter` is set to 7. + This improves precision with few components. Note that in general + users should rather increase `n_oversamples` before increasing `n_iter` + as the principle of the randomized method is to avoid usage of these + more costly power iterations steps. When `n_components` is equal + or greater to the effective matrix rank and the spectrum does not + present a slow decay, `n_iter=0` or `1` should even work fine in theory + (see Halko et al paper, page 9). + + power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto' + Whether the power iterations are normalized with step-by-step + QR factorization (the slowest but most accurate), 'none' + (the fastest but numerically unstable when `n_iter` is large, e.g. + typically 5 or larger), or 'LU' factorization (numerically stable + but can lose slightly in accuracy). The 'auto' mode applies no + normalization if `n_iter` <= 2 and switches to LU otherwise. + + selection : {'value', 'module'}, default='module' + Strategy used to select the n components. When `selection` is `'value'` + (not yet implemented, will become the default when implemented), the + components corresponding to the n largest eigenvalues are returned. + When `selection` is `'module'`, the components corresponding to the n + eigenvalues with largest modules are returned. + + random_state : int, RandomState instance, default=None + The seed of the pseudo random number generator to use when shuffling + the data, i.e. getting the random vectors to initialize the algorithm. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Notes + ----- + This algorithm finds a (usually very good) approximate truncated + eigendecomposition using randomized methods to speed up the computations. + + This method is particularly fast on large matrices on which + you wish to extract only a small number of components. In order to + obtain further speed up, `n_iter` can be set <=2 (at the cost of + loss of precision). To increase the precision it is recommended to + increase `n_oversamples`, up to `2*k-n_components` where k is the + effective rank. Usually, `n_components` is chosen to be greater than k + so increasing `n_oversamples` up to `n_components` should be enough. + + Strategy 'value': not implemented yet. + Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good + candidates for a future implementation. + + Strategy 'module': + The principle is that for diagonalizable matrices, the singular values and + eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a + singular value of A. This method relies on a randomized SVD to find the n + singular components corresponding to the n singular values with largest + modules, and then uses the signs of the singular vectors to find the true + sign of t: if the sign of left and right singular vectors are different + then the corresponding eigenvalue is negative. + + Returns + ------- + eigvals : 1D array of shape (n_components,) containing the `n_components` + eigenvalues selected (see ``selection`` parameter). + eigvecs : 2D array of shape (M.shape[0], n_components) containing the + `n_components` eigenvectors corresponding to the `eigvals`, in the + corresponding order. Note that this follows the `scipy.linalg.eigh` + convention. + + See Also + -------- + :func:`randomized_svd` + + References + ---------- + * :arxiv:`"Finding structure with randomness: + Stochastic algorithms for constructing approximate matrix decompositions" + (Algorithm 4.3 for strategy 'module') <0909.4061>` + Halko, et al. (2009) + """ + if selection == "value": # pragma: no cover + # to do : an algorithm can be found in the Halko et al reference + raise NotImplementedError() + + elif selection == "module": + # Note: no need for deterministic U and Vt (flip_sign=True), + # as we only use the dot product UVt afterwards + U, S, Vt = randomized_svd( + M, + n_components=n_components, + n_oversamples=n_oversamples, + n_iter=n_iter, + power_iteration_normalizer=power_iteration_normalizer, + flip_sign=False, + random_state=random_state, + ) + + eigvecs = U[:, :n_components] + eigvals = S[:n_components] + + # Conversion of Singular values into Eigenvalues: + # For any eigenvalue t, the corresponding singular value is |t|. + # So if there is a negative eigenvalue t, the corresponding singular + # value will be -t, and the left (U) and right (V) singular vectors + # will have opposite signs. + # Fastest way: see + diag_VtU = np.einsum("ji,ij->j", Vt[:n_components, :], U[:, :n_components]) + signs = np.sign(diag_VtU) + eigvals = eigvals * signs + + else: # pragma: no cover + raise ValueError("Invalid `selection`: %r" % selection) + + return eigvals, eigvecs + + +def weighted_mode(a, w, *, axis=0): + """Return an array of the weighted modal (most common) value in the passed array. + + If there is more than one such value, only the first is returned. + The bin-count for the modal bins is also returned. + + This is an extension of the algorithm in scipy.stats.mode. + + Parameters + ---------- + a : array-like of shape (n_samples,) + Array of which values to find mode(s). + w : array-like of shape (n_samples,) + Array of weights for each value. + axis : int, default=0 + Axis along which to operate. Default is 0, i.e. the first axis. + + Returns + ------- + vals : ndarray + Array of modal values. + score : ndarray + Array of weighted counts for each mode. + + See Also + -------- + scipy.stats.mode: Calculates the Modal (most common) value of array elements + along specified axis. + + Examples + -------- + >>> from sklearn.utils.extmath import weighted_mode + >>> x = [4, 1, 4, 2, 4, 2] + >>> weights = [1, 1, 1, 1, 1, 1] + >>> weighted_mode(x, weights) + (array([4.]), array([3.])) + + The value 4 appears three times: with uniform weights, the result is + simply the mode of the distribution. + + >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's + >>> weighted_mode(x, weights) + (array([2.]), array([3.5])) + + The value 2 has the highest score: it appears twice with weights of + 1.5 and 2: the sum of these is 3.5. + """ + if axis is None: + a = np.ravel(a) + w = np.ravel(w) + axis = 0 + else: + a = np.asarray(a) + w = np.asarray(w) + + if a.shape != w.shape: + w = np.full(a.shape, w, dtype=w.dtype) + + scores = np.unique(np.ravel(a)) # get ALL unique values + testshape = list(a.shape) + testshape[axis] = 1 + oldmostfreq = np.zeros(testshape) + oldcounts = np.zeros(testshape) + for score in scores: + template = np.zeros(a.shape) + ind = a == score + template[ind] = w[ind] + counts = np.expand_dims(np.sum(template, axis), axis) + mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) + oldcounts = np.maximum(counts, oldcounts) + oldmostfreq = mostfrequent + return mostfrequent, oldcounts + + +def cartesian(arrays, out=None): + """Generate a cartesian product of input arrays. + + Parameters + ---------- + arrays : list of array-like + 1-D arrays to form the cartesian product of. + out : ndarray of shape (M, len(arrays)), default=None + Array to place the cartesian product in. + + Returns + ------- + out : ndarray of shape (M, len(arrays)) + Array containing the cartesian products formed of input arrays. + If not provided, the `dtype` of the output array is set to the most + permissive `dtype` of the input arrays, according to NumPy type + promotion. + + .. versionadded:: 1.2 + Add support for arrays of different types. + + Notes + ----- + This function may not be used on more than 32 arrays + because the underlying numpy functions do not support it. + + Examples + -------- + >>> from sklearn.utils.extmath import cartesian + >>> cartesian(([1, 2, 3], [4, 5], [6, 7])) + array([[1, 4, 6], + [1, 4, 7], + [1, 5, 6], + [1, 5, 7], + [2, 4, 6], + [2, 4, 7], + [2, 5, 6], + [2, 5, 7], + [3, 4, 6], + [3, 4, 7], + [3, 5, 6], + [3, 5, 7]]) + """ + arrays = [np.asarray(x) for x in arrays] + shape = (len(x) for x in arrays) + + ix = np.indices(shape) + ix = ix.reshape(len(arrays), -1).T + + if out is None: + dtype = np.result_type(*arrays) # find the most permissive dtype + out = np.empty_like(ix, dtype=dtype) + + for n, arr in enumerate(arrays): + out[:, n] = arrays[n][ix[:, n]] + + return out + + +def svd_flip(u, v, u_based_decision=True): + """Sign correction to ensure deterministic output from SVD. + + Adjusts the columns of u and the rows of v such that the loadings in the + columns in u that are largest in absolute value are always positive. + + If u_based_decision is False, then the same sign correction is applied to + so that the rows in v that are largest in absolute value are always + positive. + + Parameters + ---------- + u : ndarray + Parameters u and v are the output of `linalg.svd` or + :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner + dimensions so one can compute `np.dot(u * s, v)`. + + v : ndarray + Parameters u and v are the output of `linalg.svd` or + :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner + dimensions so one can compute `np.dot(u * s, v)`. The input v should + really be called vt to be consistent with scipy's output. + + u_based_decision : bool, default=True + If True, use the columns of u as the basis for sign flipping. + Otherwise, use the rows of v. The choice of which variable to base the + decision on is generally algorithm dependent. + + Returns + ------- + u_adjusted : ndarray + Array u with adjusted columns and the same dimensions as u. + + v_adjusted : ndarray + Array v with adjusted rows and the same dimensions as v. + """ + xp, _ = get_namespace(u, v) + device = getattr(u, "device", None) + + if u_based_decision: + # columns of u, rows of v, or equivalently rows of u.T and v + max_abs_u_cols = xp.argmax(xp.abs(u.T), axis=1) + shift = xp.arange(u.T.shape[0], device=device) + indices = max_abs_u_cols + shift * u.T.shape[1] + signs = xp.sign(xp.take(xp.reshape(u.T, (-1,)), indices, axis=0)) + u *= signs[np.newaxis, :] + v *= signs[:, np.newaxis] + else: + # rows of v, columns of u + max_abs_v_rows = xp.argmax(xp.abs(v), axis=1) + shift = xp.arange(v.shape[0], device=device) + indices = max_abs_v_rows + shift * v.shape[1] + signs = xp.sign(xp.take(xp.reshape(v, (-1,)), indices)) + u *= signs[np.newaxis, :] + v *= signs[:, np.newaxis] + return u, v + + +# TODO(1.6): remove +@deprecated( # type: ignore + "The function `log_logistic` is deprecated and will be removed in 1.6. " + "Use `-np.logaddexp(0, -x)` instead." +) +def log_logistic(X, out=None): + """Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``. + + This implementation is numerically stable and uses `-np.logaddexp(0, -x)`. + + For the ordinary logistic function, use ``scipy.special.expit``. + + Parameters + ---------- + X : array-like of shape (M, N) or (M,) + Argument to the logistic function. + + out : array-like of shape (M, N) or (M,), default=None + Preallocated output array. + + Returns + ------- + out : ndarray of shape (M, N) or (M,) + Log of the logistic function evaluated at every point in x. + + Notes + ----- + See the blog post describing this implementation: + http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/ + """ + X = check_array(X, dtype=np.float64, ensure_2d=False) + + if out is None: + out = np.empty_like(X) + + np.logaddexp(0, -X, out=out) + out *= -1 + return out + + +def softmax(X, copy=True): + """ + Calculate the softmax function. + + The softmax function is calculated by + np.exp(X) / np.sum(np.exp(X), axis=1) + + This will cause overflow when large values are exponentiated. + Hence the largest value in each row is subtracted from each data + point to prevent this. + + Parameters + ---------- + X : array-like of float of shape (M, N) + Argument to the logistic function. + + copy : bool, default=True + Copy X or not. + + Returns + ------- + out : ndarray of shape (M, N) + Softmax function evaluated at every point in x. + """ + xp, is_array_api_compliant = get_namespace(X) + if copy: + X = xp.asarray(X, copy=True) + max_prob = xp.reshape(xp.max(X, axis=1), (-1, 1)) + X -= max_prob + + if _is_numpy_namespace(xp): + # optimization for NumPy arrays + np.exp(X, out=np.asarray(X)) + else: + # array_api does not have `out=` + X = xp.exp(X) + + sum_prob = xp.reshape(xp.sum(X, axis=1), (-1, 1)) + X /= sum_prob + return X + + +def make_nonnegative(X, min_value=0): + """Ensure `X.min()` >= `min_value`. + + Parameters + ---------- + X : array-like + The matrix to make non-negative. + min_value : float, default=0 + The threshold value. + + Returns + ------- + array-like + The thresholded array. + + Raises + ------ + ValueError + When X is sparse. + """ + min_ = X.min() + if min_ < min_value: + if sparse.issparse(X): + raise ValueError( + "Cannot make the data matrix" + " nonnegative because it is sparse." + " Adding a value to every entry would" + " make it no longer sparse." + ) + X = X + (min_value - min_) + return X + + +# Use at least float64 for the accumulating functions to avoid precision issue +# see https://github.com/numpy/numpy/issues/9393. The float64 is also retained +# as it is in case the float overflows +def _safe_accumulator_op(op, x, *args, **kwargs): + """ + This function provides numpy accumulator functions with a float64 dtype + when used on a floating point input. This prevents accumulator overflow on + smaller floating point dtypes. + + Parameters + ---------- + op : function + A numpy accumulator function such as np.mean or np.sum. + x : ndarray + A numpy array to apply the accumulator function. + *args : positional arguments + Positional arguments passed to the accumulator function after the + input x. + **kwargs : keyword arguments + Keyword arguments passed to the accumulator function. + + Returns + ------- + result + The output of the accumulator function passed to this function. + """ + if np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize < 8: + result = op(x, *args, **kwargs, dtype=np.float64) + else: + result = op(x, *args, **kwargs) + return result + + +def _incremental_mean_and_var( + X, last_mean, last_variance, last_sample_count, sample_weight=None +): + """Calculate mean update and a Youngs and Cramer variance update. + + If sample_weight is given, the weighted mean and variance is computed. + + Update a given mean and (possibly) variance according to new data given + in X. last_mean is always required to compute the new mean. + If last_variance is None, no variance is computed and None return for + updated_variance. + + From the paper "Algorithms for computing the sample variance: analysis and + recommendations", by Chan, Golub, and LeVeque. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data to use for variance update. + + last_mean : array-like of shape (n_features,) + + last_variance : array-like of shape (n_features,) + + last_sample_count : array-like of shape (n_features,) + The number of samples encountered until now if sample_weight is None. + If sample_weight is not None, this is the sum of sample_weight + encountered. + + sample_weight : array-like of shape (n_samples,) or None + Sample weights. If None, compute the unweighted mean/variance. + + Returns + ------- + updated_mean : ndarray of shape (n_features,) + + updated_variance : ndarray of shape (n_features,) + None if last_variance was None. + + updated_sample_count : ndarray of shape (n_features,) + + Notes + ----- + NaNs are ignored during the algorithm. + + References + ---------- + T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample + variance: recommendations, The American Statistician, Vol. 37, No. 3, + pp. 242-247 + + Also, see the sparse implementation of this in + `utils.sparsefuncs.incr_mean_variance_axis` and + `utils.sparsefuncs_fast.incr_mean_variance_axis0` + """ + # old = stats until now + # new = the current increment + # updated = the aggregated stats + last_sum = last_mean * last_sample_count + X_nan_mask = np.isnan(X) + if np.any(X_nan_mask): + sum_op = np.nansum + else: + sum_op = np.sum + if sample_weight is not None: + # equivalent to np.nansum(X * sample_weight, axis=0) + # safer because np.float64(X*W) != np.float64(X)*np.float64(W) + new_sum = _safe_accumulator_op( + np.matmul, sample_weight, np.where(X_nan_mask, 0, X) + ) + new_sample_count = _safe_accumulator_op( + np.sum, sample_weight[:, None] * (~X_nan_mask), axis=0 + ) + else: + new_sum = _safe_accumulator_op(sum_op, X, axis=0) + n_samples = X.shape[0] + new_sample_count = n_samples - np.sum(X_nan_mask, axis=0) + + updated_sample_count = last_sample_count + new_sample_count + + updated_mean = (last_sum + new_sum) / updated_sample_count + + if last_variance is None: + updated_variance = None + else: + T = new_sum / new_sample_count + temp = X - T + if sample_weight is not None: + # equivalent to np.nansum((X-T)**2 * sample_weight, axis=0) + # safer because np.float64(X*W) != np.float64(X)*np.float64(W) + correction = _safe_accumulator_op( + np.matmul, sample_weight, np.where(X_nan_mask, 0, temp) + ) + temp **= 2 + new_unnormalized_variance = _safe_accumulator_op( + np.matmul, sample_weight, np.where(X_nan_mask, 0, temp) + ) + else: + correction = _safe_accumulator_op(sum_op, temp, axis=0) + temp **= 2 + new_unnormalized_variance = _safe_accumulator_op(sum_op, temp, axis=0) + + # correction term of the corrected 2 pass algorithm. + # See "Algorithms for computing the sample variance: analysis + # and recommendations", by Chan, Golub, and LeVeque. + new_unnormalized_variance -= correction**2 / new_sample_count + + last_unnormalized_variance = last_variance * last_sample_count + + with np.errstate(divide="ignore", invalid="ignore"): + last_over_new_count = last_sample_count / new_sample_count + updated_unnormalized_variance = ( + last_unnormalized_variance + + new_unnormalized_variance + + last_over_new_count + / updated_sample_count + * (last_sum / last_over_new_count - new_sum) ** 2 + ) + + zeros = last_sample_count == 0 + updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros] + updated_variance = updated_unnormalized_variance / updated_sample_count + + return updated_mean, updated_variance, updated_sample_count + + +def _deterministic_vector_sign_flip(u): + """Modify the sign of vectors for reproducibility. + + Flips the sign of elements of all the vectors (rows of u) such that + the absolute maximum element of each vector is positive. + + Parameters + ---------- + u : ndarray + Array with vectors as its rows. + + Returns + ------- + u_flipped : ndarray with same shape as u + Array with the sign flipped vectors as its rows. + """ + max_abs_rows = np.argmax(np.abs(u), axis=1) + signs = np.sign(u[range(u.shape[0]), max_abs_rows]) + u *= signs[:, np.newaxis] + return u + + +def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): + """Use high precision for cumsum and check that final value matches sum. + + Warns if the final cumulative sum does not match the sum (up to the chosen + tolerance). + + Parameters + ---------- + arr : array-like + To be cumulatively summed as flat. + axis : int, default=None + Axis along which the cumulative sum is computed. + The default (None) is to compute the cumsum over the flattened array. + rtol : float, default=1e-05 + Relative tolerance, see ``np.allclose``. + atol : float, default=1e-08 + Absolute tolerance, see ``np.allclose``. + + Returns + ------- + out : ndarray + Array with the cumulative sums along the chosen axis. + """ + out = np.cumsum(arr, axis=axis, dtype=np.float64) + expected = np.sum(arr, axis=axis, dtype=np.float64) + if not np.allclose( + out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True + ): + warnings.warn( + ( + "cumsum was found to be unstable: " + "its last element does not correspond to sum" + ), + RuntimeWarning, + ) + return out + + +def _nanaverage(a, weights=None): + """Compute the weighted average, ignoring NaNs. + + Parameters + ---------- + a : ndarray + Array containing data to be averaged. + weights : array-like, default=None + An array of weights associated with the values in a. Each value in a + contributes to the average according to its associated weight. The + weights array can either be 1-D of the same shape as a. If `weights=None`, + then all data in a are assumed to have a weight equal to one. + + Returns + ------- + weighted_average : float + The weighted average. + + Notes + ----- + This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so + that :func:`np.nan` values are ignored from the average and weights can + be passed. Note that when possible, we delegate to the prime methods. + """ + + if len(a) == 0: + return np.nan + + mask = np.isnan(a) + if mask.all(): + return np.nan + + if weights is None: + return np.nanmean(a) + + weights = np.asarray(weights) + a, weights = a[~mask], weights[~mask] + try: + return np.average(a, weights=weights) + except ZeroDivisionError: + # this is when all weights are zero, then ignore them + return np.average(a) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/fixes.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/fixes.py new file mode 100644 index 0000000000000000000000000000000000000000..e97062390920d4ab0306dc946ef9d434f84e1628 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/fixes.py @@ -0,0 +1,394 @@ +"""Compatibility fixes for older version of python, numpy and scipy + +If you add content to this file, please give the version of the package +at which the fix is no longer needed. +""" +# Authors: Emmanuelle Gouillart +# Gael Varoquaux +# Fabian Pedregosa +# Lars Buitinck +# +# License: BSD 3 clause + + +import numpy as np +import scipy +import scipy.sparse.linalg +import scipy.stats +import threadpoolctl + +import sklearn + +from ..externals._packaging.version import parse as parse_version +from .deprecation import deprecated + +np_version = parse_version(np.__version__) +np_base_version = parse_version(np_version.base_version) +sp_version = parse_version(scipy.__version__) +sp_base_version = parse_version(sp_version.base_version) + +# TODO: We can consider removing the containers and importing +# directly from SciPy when sparse matrices will be deprecated. +CSR_CONTAINERS = [scipy.sparse.csr_matrix] +CSC_CONTAINERS = [scipy.sparse.csc_matrix] +COO_CONTAINERS = [scipy.sparse.coo_matrix] +LIL_CONTAINERS = [scipy.sparse.lil_matrix] +DOK_CONTAINERS = [scipy.sparse.dok_matrix] +BSR_CONTAINERS = [scipy.sparse.bsr_matrix] +DIA_CONTAINERS = [scipy.sparse.dia_matrix] + +if parse_version(scipy.__version__) >= parse_version("1.8"): + # Sparse Arrays have been added in SciPy 1.8 + # TODO: When SciPy 1.8 is the minimum supported version, + # those list can be created directly without this condition. + # See: https://github.com/scikit-learn/scikit-learn/issues/27090 + CSR_CONTAINERS.append(scipy.sparse.csr_array) + CSC_CONTAINERS.append(scipy.sparse.csc_array) + COO_CONTAINERS.append(scipy.sparse.coo_array) + LIL_CONTAINERS.append(scipy.sparse.lil_array) + DOK_CONTAINERS.append(scipy.sparse.dok_array) + BSR_CONTAINERS.append(scipy.sparse.bsr_array) + DIA_CONTAINERS.append(scipy.sparse.dia_array) + +try: + from scipy.optimize._linesearch import line_search_wolfe1, line_search_wolfe2 +except ImportError: # SciPy < 1.8 + from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1 # type: ignore # noqa + + +def _object_dtype_isnan(X): + return X != X + + +# Rename the `method` kwarg to `interpolation` for NumPy < 1.22, because +# `interpolation` kwarg was deprecated in favor of `method` in NumPy >= 1.22. +def _percentile(a, q, *, method="linear", **kwargs): + return np.percentile(a, q, interpolation=method, **kwargs) + + +if np_version < parse_version("1.22"): + percentile = _percentile +else: # >= 1.22 + from numpy import percentile # type: ignore # noqa + + +# compatibility fix for threadpoolctl >= 3.0.0 +# since version 3 it's possible to setup a global threadpool controller to avoid +# looping through all loaded shared libraries each time. +# the global controller is created during the first call to threadpoolctl. +def _get_threadpool_controller(): + if not hasattr(threadpoolctl, "ThreadpoolController"): + return None + + if not hasattr(sklearn, "_sklearn_threadpool_controller"): + sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController() + + return sklearn._sklearn_threadpool_controller + + +def threadpool_limits(limits=None, user_api=None): + controller = _get_threadpool_controller() + if controller is not None: + return controller.limit(limits=limits, user_api=user_api) + else: + return threadpoolctl.threadpool_limits(limits=limits, user_api=user_api) + + +threadpool_limits.__doc__ = threadpoolctl.threadpool_limits.__doc__ + + +def threadpool_info(): + controller = _get_threadpool_controller() + if controller is not None: + return controller.info() + else: + return threadpoolctl.threadpool_info() + + +threadpool_info.__doc__ = threadpoolctl.threadpool_info.__doc__ + + +@deprecated( + "The function `delayed` has been moved from `sklearn.utils.fixes` to " + "`sklearn.utils.parallel`. This import path will be removed in 1.5." +) +def delayed(function): + from sklearn.utils.parallel import delayed + + return delayed(function) + + +# TODO: Remove when SciPy 1.11 is the minimum supported version +def _mode(a, axis=0): + if sp_version >= parse_version("1.9.0"): + mode = scipy.stats.mode(a, axis=axis, keepdims=True) + if sp_version >= parse_version("1.10.999"): + # scipy.stats.mode has changed returned array shape with axis=None + # and keepdims=True, see https://github.com/scipy/scipy/pull/17561 + if axis is None: + mode = np.ravel(mode) + return mode + return scipy.stats.mode(a, axis=axis) + + +# TODO: Remove when Scipy 1.12 is the minimum supported version +if sp_base_version >= parse_version("1.12.0"): + _sparse_linalg_cg = scipy.sparse.linalg.cg +else: + + def _sparse_linalg_cg(A, b, **kwargs): + if "rtol" in kwargs: + kwargs["tol"] = kwargs.pop("rtol") + if "atol" not in kwargs: + kwargs["atol"] = "legacy" + return scipy.sparse.linalg.cg(A, b, **kwargs) + + +# TODO: Fuse the modern implementations of _sparse_min_max and _sparse_nan_min_max +# into the public min_max_axis function when Scipy 1.11 is the minimum supported +# version and delete the backport in the else branch below. +if sp_base_version >= parse_version("1.11.0"): + + def _sparse_min_max(X, axis): + the_min = X.min(axis=axis) + the_max = X.max(axis=axis) + + if axis is not None: + the_min = the_min.toarray().ravel() + the_max = the_max.toarray().ravel() + + return the_min, the_max + + def _sparse_nan_min_max(X, axis): + the_min = X.nanmin(axis=axis) + the_max = X.nanmax(axis=axis) + + if axis is not None: + the_min = the_min.toarray().ravel() + the_max = the_max.toarray().ravel() + + return the_min, the_max + +else: + # This code is mostly taken from scipy 0.14 and extended to handle nans, see + # https://github.com/scikit-learn/scikit-learn/pull/11196 + def _minor_reduce(X, ufunc): + major_index = np.flatnonzero(np.diff(X.indptr)) + + # reduceat tries casts X.indptr to intp, which errors + # if it is int64 on a 32 bit system. + # Reinitializing prevents this where possible, see #13737 + X = type(X)((X.data, X.indices, X.indptr), shape=X.shape) + value = ufunc.reduceat(X.data, X.indptr[major_index]) + return major_index, value + + def _min_or_max_axis(X, axis, min_or_max): + N = X.shape[axis] + if N == 0: + raise ValueError("zero-size array to reduction operation") + M = X.shape[1 - axis] + mat = X.tocsc() if axis == 0 else X.tocsr() + mat.sum_duplicates() + major_index, value = _minor_reduce(mat, min_or_max) + not_full = np.diff(mat.indptr)[major_index] < N + value[not_full] = min_or_max(value[not_full], 0) + mask = value != 0 + major_index = np.compress(mask, major_index) + value = np.compress(mask, value) + + if axis == 0: + res = scipy.sparse.coo_matrix( + (value, (np.zeros(len(value)), major_index)), + dtype=X.dtype, + shape=(1, M), + ) + else: + res = scipy.sparse.coo_matrix( + (value, (major_index, np.zeros(len(value)))), + dtype=X.dtype, + shape=(M, 1), + ) + return res.A.ravel() + + def _sparse_min_or_max(X, axis, min_or_max): + if axis is None: + if 0 in X.shape: + raise ValueError("zero-size array to reduction operation") + zero = X.dtype.type(0) + if X.nnz == 0: + return zero + m = min_or_max.reduce(X.data.ravel()) + if X.nnz != np.prod(X.shape): + m = min_or_max(zero, m) + return m + if axis < 0: + axis += 2 + if (axis == 0) or (axis == 1): + return _min_or_max_axis(X, axis, min_or_max) + else: + raise ValueError("invalid axis, use 0 for rows, or 1 for columns") + + def _sparse_min_max(X, axis): + return ( + _sparse_min_or_max(X, axis, np.minimum), + _sparse_min_or_max(X, axis, np.maximum), + ) + + def _sparse_nan_min_max(X, axis): + return ( + _sparse_min_or_max(X, axis, np.fmin), + _sparse_min_or_max(X, axis, np.fmax), + ) + + +# For +1.25 NumPy versions exceptions and warnings are being moved +# to a dedicated submodule. +if np_version >= parse_version("1.25.0"): + from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning +else: + from numpy import ComplexWarning, VisibleDeprecationWarning # type: ignore # noqa + + +# TODO: Remove when Scipy 1.6 is the minimum supported version +try: + from scipy.integrate import trapezoid # type: ignore # noqa +except ImportError: + from scipy.integrate import trapz as trapezoid # type: ignore # noqa + + +# TODO: Adapt when Pandas > 2.2 is the minimum supported version +def pd_fillna(pd, frame): + pd_version = parse_version(pd.__version__).base_version + if parse_version(pd_version) < parse_version("2.2"): + frame = frame.fillna(value=np.nan) + else: + infer_objects_kwargs = ( + {} if parse_version(pd_version) >= parse_version("3") else {"copy": False} + ) + with pd.option_context("future.no_silent_downcasting", True): + frame = frame.fillna(value=np.nan).infer_objects(**infer_objects_kwargs) + return frame + + +# TODO: remove when SciPy 1.12 is the minimum supported version +def _preserve_dia_indices_dtype( + sparse_container, original_container_format, requested_sparse_format +): + """Preserve indices dtype for SciPy < 1.12 when converting from DIA to CSR/CSC. + + For SciPy < 1.12, DIA arrays indices are upcasted to `np.int64` that is + inconsistent with DIA matrices. We downcast the indices dtype to `np.int32` to + be consistent with DIA matrices. + + The converted indices arrays are affected back inplace to the sparse container. + + Parameters + ---------- + sparse_container : sparse container + Sparse container to be checked. + requested_sparse_format : str or bool + The type of format of `sparse_container`. + + Notes + ----- + See https://github.com/scipy/scipy/issues/19245 for more details. + """ + if original_container_format == "dia_array" and requested_sparse_format in ( + "csr", + "coo", + ): + if requested_sparse_format == "csr": + index_dtype = _smallest_admissible_index_dtype( + arrays=(sparse_container.indptr, sparse_container.indices), + maxval=max(sparse_container.nnz, sparse_container.shape[1]), + check_contents=True, + ) + sparse_container.indices = sparse_container.indices.astype( + index_dtype, copy=False + ) + sparse_container.indptr = sparse_container.indptr.astype( + index_dtype, copy=False + ) + else: # requested_sparse_format == "coo" + index_dtype = _smallest_admissible_index_dtype( + maxval=max(sparse_container.shape) + ) + sparse_container.row = sparse_container.row.astype(index_dtype, copy=False) + sparse_container.col = sparse_container.col.astype(index_dtype, copy=False) + + +# TODO: remove when SciPy 1.12 is the minimum supported version +def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False): + """Based on input (integer) arrays `a`, determine a suitable index data + type that can hold the data in the arrays. + + This function returns `np.int64` if it either required by `maxval` or based on the + largest precision of the dtype of the arrays passed as argument, or by the their + contents (when `check_contents is True`). If none of the condition requires + `np.int64` then this function returns `np.int32`. + + Parameters + ---------- + arrays : ndarray or tuple of ndarrays, default=() + Input arrays whose types/contents to check. + + maxval : float, default=None + Maximum value needed. + + check_contents : bool, default=False + Whether to check the values in the arrays and not just their types. + By default, check only the types. + + Returns + ------- + dtype : {np.int32, np.int64} + Suitable index data type (int32 or int64). + """ + + int32min = np.int32(np.iinfo(np.int32).min) + int32max = np.int32(np.iinfo(np.int32).max) + + if maxval is not None: + if maxval > np.iinfo(np.int64).max: + raise ValueError( + f"maxval={maxval} is to large to be represented as np.int64." + ) + if maxval > int32max: + return np.int64 + + if isinstance(arrays, np.ndarray): + arrays = (arrays,) + + for arr in arrays: + if not isinstance(arr, np.ndarray): + raise TypeError( + f"Arrays should be of type np.ndarray, got {type(arr)} instead." + ) + if not np.issubdtype(arr.dtype, np.integer): + raise ValueError( + f"Array dtype {arr.dtype} is not supported for index dtype. We expect " + "integral values." + ) + if not np.can_cast(arr.dtype, np.int32): + if not check_contents: + # when `check_contents` is False, we stay on the safe side and return + # np.int64. + return np.int64 + if arr.size == 0: + # a bigger type not needed yet, let's look at the next array + continue + else: + maxval = arr.max() + minval = arr.min() + if minval < int32min or maxval > int32max: + # a big index type is actually needed + return np.int64 + + return np.int32 + + +# TODO: Remove when Scipy 1.12 is the minimum supported version +if sp_version < parse_version("1.12"): + from ..externals._scipy.sparse.csgraph import laplacian # type: ignore # noqa +else: + from scipy.sparse.csgraph import laplacian # type: ignore # noqa # pragma: no cover diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/graph.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..06b2e152101a9c2fe843a0704ea17080ba73a21b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/graph.py @@ -0,0 +1,166 @@ +""" +The :mod:`sklearn.utils.graph` module includes graph utilities and algorithms. +""" + +# Authors: Aric Hagberg +# Gael Varoquaux +# Jake Vanderplas +# License: BSD 3 clause + +import numpy as np +from scipy import sparse + +from ..metrics.pairwise import pairwise_distances +from ._param_validation import Integral, Interval, validate_params + + +############################################################################### +# Path and connected component analysis. +# Code adapted from networkx +@validate_params( + { + "graph": ["array-like", "sparse matrix"], + "source": [Interval(Integral, 0, None, closed="left")], + "cutoff": [Interval(Integral, 0, None, closed="left"), None], + }, + prefer_skip_nested_validation=True, +) +def single_source_shortest_path_length(graph, source, *, cutoff=None): + """Return the length of the shortest path from source to all reachable nodes. + + Parameters + ---------- + graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes) + Adjacency matrix of the graph. Sparse matrix of format LIL is + preferred. + + source : int + Start node for path. + + cutoff : int, default=None + Depth to stop the search - only paths of length <= cutoff are returned. + + Returns + ------- + paths : dict + Reachable end nodes mapped to length of path from source, + i.e. `{end: path_length}`. + + Examples + -------- + >>> from sklearn.utils.graph import single_source_shortest_path_length + >>> import numpy as np + >>> graph = np.array([[ 0, 1, 0, 0], + ... [ 1, 0, 1, 0], + ... [ 0, 1, 0, 0], + ... [ 0, 0, 0, 0]]) + >>> single_source_shortest_path_length(graph, 0) + {0: 0, 1: 1, 2: 2} + >>> graph = np.ones((6, 6)) + >>> sorted(single_source_shortest_path_length(graph, 2).items()) + [(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)] + """ + if sparse.issparse(graph): + graph = graph.tolil() + else: + graph = sparse.lil_matrix(graph) + seen = {} # level (number of hops) when seen in BFS + level = 0 # the current level + next_level = [source] # dict of nodes to check at next level + while next_level: + this_level = next_level # advance to next level + next_level = set() # and start a new list (fringe) + for v in this_level: + if v not in seen: + seen[v] = level # set the level of vertex v + next_level.update(graph.rows[v]) + if cutoff is not None and cutoff <= level: + break + level += 1 + return seen # return all path lengths as dictionary + + +def _fix_connected_components( + X, + graph, + n_connected_components, + component_labels, + mode="distance", + metric="euclidean", + **kwargs, +): + """Add connections to sparse graph to connect unconnected components. + + For each pair of unconnected components, compute all pairwise distances + from one component to the other, and add a connection on the closest pair + of samples. This is a hacky way to get a graph with a single connected + component, which is necessary for example to compute a shortest path + between all pairs of samples in the graph. + + Parameters + ---------- + X : array of shape (n_samples, n_features) or (n_samples, n_samples) + Features to compute the pairwise distances. If `metric = + "precomputed"`, X is the matrix of pairwise distances. + + graph : sparse matrix of shape (n_samples, n_samples) + Graph of connection between samples. + + n_connected_components : int + Number of connected components, as computed by + `scipy.sparse.csgraph.connected_components`. + + component_labels : array of shape (n_samples) + Labels of connected components, as computed by + `scipy.sparse.csgraph.connected_components`. + + mode : {'connectivity', 'distance'}, default='distance' + Type of graph matrix: 'connectivity' corresponds to the connectivity + matrix with ones and zeros, and 'distance' corresponds to the distances + between neighbors according to the given metric. + + metric : str + Metric used in `sklearn.metrics.pairwise.pairwise_distances`. + + kwargs : kwargs + Keyword arguments passed to + `sklearn.metrics.pairwise.pairwise_distances`. + + Returns + ------- + graph : sparse matrix of shape (n_samples, n_samples) + Graph of connection between samples, with a single connected component. + """ + if metric == "precomputed" and sparse.issparse(X): + raise RuntimeError( + "_fix_connected_components with metric='precomputed' requires the " + "full distance matrix in X, and does not work with a sparse " + "neighbors graph." + ) + + for i in range(n_connected_components): + idx_i = np.flatnonzero(component_labels == i) + Xi = X[idx_i] + for j in range(i): + idx_j = np.flatnonzero(component_labels == j) + Xj = X[idx_j] + + if metric == "precomputed": + D = X[np.ix_(idx_i, idx_j)] + else: + D = pairwise_distances(Xi, Xj, metric=metric, **kwargs) + + ii, jj = np.unravel_index(D.argmin(axis=None), D.shape) + if mode == "connectivity": + graph[idx_i[ii], idx_j[jj]] = 1 + graph[idx_j[jj], idx_i[ii]] = 1 + elif mode == "distance": + graph[idx_i[ii], idx_j[jj]] = D[ii, jj] + graph[idx_j[jj], idx_i[ii]] = D[ii, jj] + else: + raise ValueError( + "Unknown mode=%r, should be one of ['connectivity', 'distance']." + % mode + ) + + return graph diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/metaestimators.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/metaestimators.py new file mode 100644 index 0000000000000000000000000000000000000000..639e000dd77a7a8908d64235a5b2ff78111888b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/metaestimators.py @@ -0,0 +1,165 @@ +""" +The :mod:`sklearn.utils.metaestimators` module includes utilities for meta-estimators. +""" + +# Author: Joel Nothman +# Andreas Mueller +# License: BSD +from abc import ABCMeta, abstractmethod +from contextlib import suppress +from typing import Any, List + +import numpy as np + +from ..base import BaseEstimator +from ..utils import _safe_indexing +from ..utils._tags import _safe_tags +from ._available_if import available_if + +__all__ = ["available_if"] + + +class _BaseComposition(BaseEstimator, metaclass=ABCMeta): + """Handles parameter management for classifiers composed of named estimators.""" + + steps: List[Any] + + @abstractmethod + def __init__(self): + pass + + def _get_params(self, attr, deep=True): + out = super().get_params(deep=deep) + if not deep: + return out + + estimators = getattr(self, attr) + try: + out.update(estimators) + except (TypeError, ValueError): + # Ignore TypeError for cases where estimators is not a list of + # (name, estimator) and ignore ValueError when the list is not + # formatted correctly. This is to prevent errors when calling + # `set_params`. `BaseEstimator.set_params` calls `get_params` which + # can error for invalid values for `estimators`. + return out + + for name, estimator in estimators: + if hasattr(estimator, "get_params"): + for key, value in estimator.get_params(deep=True).items(): + out["%s__%s" % (name, key)] = value + return out + + def _set_params(self, attr, **params): + # Ensure strict ordering of parameter setting: + # 1. All steps + if attr in params: + setattr(self, attr, params.pop(attr)) + # 2. Replace items with estimators in params + items = getattr(self, attr) + if isinstance(items, list) and items: + # Get item names used to identify valid names in params + # `zip` raises a TypeError when `items` does not contains + # elements of length 2 + with suppress(TypeError): + item_names, _ = zip(*items) + for name in list(params.keys()): + if "__" not in name and name in item_names: + self._replace_estimator(attr, name, params.pop(name)) + + # 3. Step parameters and other initialisation arguments + super().set_params(**params) + return self + + def _replace_estimator(self, attr, name, new_val): + # assumes `name` is a valid estimator name + new_estimators = list(getattr(self, attr)) + for i, (estimator_name, _) in enumerate(new_estimators): + if estimator_name == name: + new_estimators[i] = (name, new_val) + break + setattr(self, attr, new_estimators) + + def _validate_names(self, names): + if len(set(names)) != len(names): + raise ValueError("Names provided are not unique: {0!r}".format(list(names))) + invalid_names = set(names).intersection(self.get_params(deep=False)) + if invalid_names: + raise ValueError( + "Estimator names conflict with constructor arguments: {0!r}".format( + sorted(invalid_names) + ) + ) + invalid_names = [name for name in names if "__" in name] + if invalid_names: + raise ValueError( + "Estimator names must not contain __: got {0!r}".format(invalid_names) + ) + + +def _safe_split(estimator, X, y, indices, train_indices=None): + """Create subset of dataset and properly handle kernels. + + Slice X, y according to indices for cross-validation, but take care of + precomputed kernel-matrices or pairwise affinities / distances. + + If ``estimator._pairwise is True``, X needs to be square and + we slice rows and columns. If ``train_indices`` is not None, + we slice rows using ``indices`` (assumed the test set) and columns + using ``train_indices``, indicating the training set. + + Labels y will always be indexed only along the first axis. + + Parameters + ---------- + estimator : object + Estimator to determine whether we should slice only rows or rows and + columns. + + X : array-like, sparse matrix or iterable + Data to be indexed. If ``estimator._pairwise is True``, + this needs to be a square array-like or sparse matrix. + + y : array-like, sparse matrix or iterable + Targets to be indexed. + + indices : array of int + Rows to select from X and y. + If ``estimator._pairwise is True`` and ``train_indices is None`` + then ``indices`` will also be used to slice columns. + + train_indices : array of int or None, default=None + If ``estimator._pairwise is True`` and ``train_indices is not None``, + then ``train_indices`` will be use to slice the columns of X. + + Returns + ------- + X_subset : array-like, sparse matrix or list + Indexed data. + + y_subset : array-like, sparse matrix or list + Indexed targets. + + """ + if _safe_tags(estimator, key="pairwise"): + if not hasattr(X, "shape"): + raise ValueError( + "Precomputed kernels or affinity matrices have " + "to be passed as arrays or sparse matrices." + ) + # X is a precomputed square kernel matrix + if X.shape[0] != X.shape[1]: + raise ValueError("X should be a square kernel matrix") + if train_indices is None: + X_subset = X[np.ix_(indices, indices)] + else: + X_subset = X[np.ix_(indices, train_indices)] + else: + X_subset = _safe_indexing(X, indices) + + if y is not None: + y_subset = _safe_indexing(y, indices) + else: + y_subset = None + + return X_subset, y_subset diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/multiclass.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/multiclass.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b23427e5b7053c397f32b71b265870405e67e5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/multiclass.py @@ -0,0 +1,553 @@ +""" +The :mod:`sklearn.utils.multiclass` module includes utilities to handle +multiclass/multioutput target in classifiers. +""" + +# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi +# +# License: BSD 3 clause +import warnings +from collections.abc import Sequence +from itertools import chain + +import numpy as np +from scipy.sparse import issparse + +from ..utils._array_api import get_namespace +from ..utils.fixes import VisibleDeprecationWarning +from .validation import _assert_all_finite, check_array + + +def _unique_multiclass(y): + xp, is_array_api_compliant = get_namespace(y) + if hasattr(y, "__array__") or is_array_api_compliant: + return xp.unique_values(xp.asarray(y)) + else: + return set(y) + + +def _unique_indicator(y): + xp, _ = get_namespace(y) + return xp.arange( + check_array(y, input_name="y", accept_sparse=["csr", "csc", "coo"]).shape[1] + ) + + +_FN_UNIQUE_LABELS = { + "binary": _unique_multiclass, + "multiclass": _unique_multiclass, + "multilabel-indicator": _unique_indicator, +} + + +def unique_labels(*ys): + """Extract an ordered array of unique labels. + + We don't allow: + - mix of multilabel and multiclass (single label) targets + - mix of label indicator matrix and anything else, + because there are no explicit labels) + - mix of label indicator matrices of different sizes + - mix of string and integer labels + + At the moment, we also don't allow "multiclass-multioutput" input type. + + Parameters + ---------- + *ys : array-likes + Label values. + + Returns + ------- + out : ndarray of shape (n_unique_labels,) + An ordered array of unique labels. + + Examples + -------- + >>> from sklearn.utils.multiclass import unique_labels + >>> unique_labels([3, 5, 5, 5, 7, 7]) + array([3, 5, 7]) + >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4]) + array([1, 2, 3, 4]) + >>> unique_labels([1, 2, 10], [5, 11]) + array([ 1, 2, 5, 10, 11]) + """ + xp, is_array_api_compliant = get_namespace(*ys) + if not ys: + raise ValueError("No argument has been passed.") + # Check that we don't mix label format + + ys_types = set(type_of_target(x) for x in ys) + if ys_types == {"binary", "multiclass"}: + ys_types = {"multiclass"} + + if len(ys_types) > 1: + raise ValueError("Mix type of y not allowed, got types %s" % ys_types) + + label_type = ys_types.pop() + + # Check consistency for the indicator format + if ( + label_type == "multilabel-indicator" + and len( + set( + check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys + ) + ) + > 1 + ): + raise ValueError( + "Multi-label binary indicator input with different numbers of labels" + ) + + # Get the unique set of labels + _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None) + if not _unique_labels: + raise ValueError("Unknown label type: %s" % repr(ys)) + + if is_array_api_compliant: + # array_api does not allow for mixed dtypes + unique_ys = xp.concat([_unique_labels(y) for y in ys]) + return xp.unique_values(unique_ys) + + ys_labels = set(chain.from_iterable((i for i in _unique_labels(y)) for y in ys)) + # Check that we don't mix string type with number type + if len(set(isinstance(label, str) for label in ys_labels)) > 1: + raise ValueError("Mix of label input types (string and number)") + + return xp.asarray(sorted(ys_labels)) + + +def _is_integral_float(y): + xp, is_array_api_compliant = get_namespace(y) + return xp.isdtype(y.dtype, "real floating") and bool( + xp.all(xp.astype((xp.astype(y, xp.int64)), y.dtype) == y) + ) + + +def is_multilabel(y): + """Check if ``y`` is in a multilabel format. + + Parameters + ---------- + y : ndarray of shape (n_samples,) + Target values. + + Returns + ------- + out : bool + Return ``True``, if ``y`` is in a multilabel format, else ```False``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.utils.multiclass import is_multilabel + >>> is_multilabel([0, 1, 0, 1]) + False + >>> is_multilabel([[1], [0, 2], []]) + False + >>> is_multilabel(np.array([[1, 0], [0, 0]])) + True + >>> is_multilabel(np.array([[1], [0], [0]])) + False + >>> is_multilabel(np.array([[1, 0, 0]])) + True + """ + xp, is_array_api_compliant = get_namespace(y) + if hasattr(y, "__array__") or isinstance(y, Sequence) or is_array_api_compliant: + # DeprecationWarning will be replaced by ValueError, see NEP 34 + # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html + check_y_kwargs = dict( + accept_sparse=True, + allow_nd=True, + force_all_finite=False, + ensure_2d=False, + ensure_min_samples=0, + ensure_min_features=0, + ) + with warnings.catch_warnings(): + warnings.simplefilter("error", VisibleDeprecationWarning) + try: + y = check_array(y, dtype=None, **check_y_kwargs) + except (VisibleDeprecationWarning, ValueError) as e: + if str(e).startswith("Complex data not supported"): + raise + + # dtype=object should be provided explicitly for ragged arrays, + # see NEP 34 + y = check_array(y, dtype=object, **check_y_kwargs) + + if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1): + return False + + if issparse(y): + if y.format in ("dok", "lil"): + y = y.tocsr() + labels = xp.unique_values(y.data) + return ( + len(y.data) == 0 + or (labels.size == 1 or (labels.size == 2) and (0 in labels)) + and (y.dtype.kind in "biu" or _is_integral_float(labels)) # bool, int, uint + ) + else: + labels = xp.unique_values(y) + + return labels.shape[0] < 3 and ( + xp.isdtype(y.dtype, ("bool", "signed integer", "unsigned integer")) + or _is_integral_float(labels) + ) + + +def check_classification_targets(y): + """Ensure that target y is of a non-regression type. + + Only the following target types (as defined in type_of_target) are allowed: + 'binary', 'multiclass', 'multiclass-multioutput', + 'multilabel-indicator', 'multilabel-sequences' + + Parameters + ---------- + y : array-like + Target values. + """ + y_type = type_of_target(y, input_name="y") + if y_type not in [ + "binary", + "multiclass", + "multiclass-multioutput", + "multilabel-indicator", + "multilabel-sequences", + ]: + raise ValueError( + f"Unknown label type: {y_type}. Maybe you are trying to fit a " + "classifier, which expects discrete classes on a " + "regression target with continuous values." + ) + + +def type_of_target(y, input_name=""): + """Determine the type of data indicated by the target. + + Note that this type is the most specific type that can be inferred. + For example: + + * ``binary`` is more specific but compatible with ``multiclass``. + * ``multiclass`` of integers is more specific but compatible with + ``continuous``. + * ``multilabel-indicator`` is more specific but compatible with + ``multiclass-multioutput``. + + Parameters + ---------- + y : {array-like, sparse matrix} + Target values. If a sparse matrix, `y` is expected to be a + CSR/CSC matrix. + + input_name : str, default="" + The data name used to construct the error message. + + .. versionadded:: 1.1.0 + + Returns + ------- + target_type : str + One of: + + * 'continuous': `y` is an array-like of floats that are not all + integers, and is 1d or a column vector. + * 'continuous-multioutput': `y` is a 2d array of floats that are + not all integers, and both dimensions are of size > 1. + * 'binary': `y` contains <= 2 discrete values and is 1d or a column + vector. + * 'multiclass': `y` contains more than two discrete values, is not a + sequence of sequences, and is 1d or a column vector. + * 'multiclass-multioutput': `y` is a 2d array that contains more + than two discrete values, is not a sequence of sequences, and both + dimensions are of size > 1. + * 'multilabel-indicator': `y` is a label indicator matrix, an array + of two dimensions with at least two columns, and at most 2 unique + values. + * 'unknown': `y` is array-like but none of the above, such as a 3d + array, sequence of sequences, or an array of non-sequence objects. + + Examples + -------- + >>> from sklearn.utils.multiclass import type_of_target + >>> import numpy as np + >>> type_of_target([0.1, 0.6]) + 'continuous' + >>> type_of_target([1, -1, -1, 1]) + 'binary' + >>> type_of_target(['a', 'b', 'a']) + 'binary' + >>> type_of_target([1.0, 2.0]) + 'binary' + >>> type_of_target([1, 0, 2]) + 'multiclass' + >>> type_of_target([1.0, 0.0, 3.0]) + 'multiclass' + >>> type_of_target(['a', 'b', 'c']) + 'multiclass' + >>> type_of_target(np.array([[1, 2], [3, 1]])) + 'multiclass-multioutput' + >>> type_of_target([[1, 2]]) + 'multilabel-indicator' + >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]])) + 'continuous-multioutput' + >>> type_of_target(np.array([[0, 1], [1, 1]])) + 'multilabel-indicator' + """ + xp, is_array_api_compliant = get_namespace(y) + valid = ( + (isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__")) + and not isinstance(y, str) + or is_array_api_compliant + ) + + if not valid: + raise ValueError( + "Expected array-like (array or non-string sequence), got %r" % y + ) + + sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"] + if sparse_pandas: + raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'") + + if is_multilabel(y): + return "multilabel-indicator" + + # DeprecationWarning will be replaced by ValueError, see NEP 34 + # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html + # We therefore catch both deprecation (NumPy < 1.24) warning and + # value error (NumPy >= 1.24). + check_y_kwargs = dict( + accept_sparse=True, + allow_nd=True, + force_all_finite=False, + ensure_2d=False, + ensure_min_samples=0, + ensure_min_features=0, + ) + + with warnings.catch_warnings(): + warnings.simplefilter("error", VisibleDeprecationWarning) + if not issparse(y): + try: + y = check_array(y, dtype=None, **check_y_kwargs) + except (VisibleDeprecationWarning, ValueError) as e: + if str(e).startswith("Complex data not supported"): + raise + + # dtype=object should be provided explicitly for ragged arrays, + # see NEP 34 + y = check_array(y, dtype=object, **check_y_kwargs) + + # The old sequence of sequences format + try: + first_row = y[[0], :] if issparse(y) else y[0] + if ( + not hasattr(first_row, "__array__") + and isinstance(first_row, Sequence) + and not isinstance(first_row, str) + ): + raise ValueError( + "You appear to be using a legacy multi-label data" + " representation. Sequence of sequences are no" + " longer supported; use a binary array or sparse" + " matrix instead - the MultiLabelBinarizer" + " transformer can convert to this format." + ) + except IndexError: + pass + + # Invalid inputs + if y.ndim not in (1, 2): + # Number of dimension greater than 2: [[[1, 2]]] + return "unknown" + if not min(y.shape): + # Empty ndarray: []/[[]] + if y.ndim == 1: + # 1-D empty array: [] + return "binary" # [] + # 2-D empty array: [[]] + return "unknown" + if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str): + # [obj_1] and not ["label_1"] + return "unknown" + + # Check if multioutput + if y.ndim == 2 and y.shape[1] > 1: + suffix = "-multioutput" # [[1, 2], [1, 2]] + else: + suffix = "" # [1, 2, 3] or [[1], [2], [3]] + + # Check float and contains non-integer float values + if xp.isdtype(y.dtype, "real floating"): + # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.] + data = y.data if issparse(y) else y + if xp.any(data != xp.astype(data, int)): + _assert_all_finite(data, input_name=input_name) + return "continuous" + suffix + + # Check multiclass + if issparse(first_row): + first_row = first_row.data + if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1): + # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]] + return "multiclass" + suffix + else: + return "binary" # [1, 2] or [["a"], ["b"]] + + +def _check_partial_fit_first_call(clf, classes=None): + """Private helper function for factorizing common classes param logic. + + Estimators that implement the ``partial_fit`` API need to be provided with + the list of possible classes at the first call to partial_fit. + + Subsequent calls to partial_fit should check that ``classes`` is still + consistent with a previous value of ``clf.classes_`` when provided. + + This function returns True if it detects that this was the first call to + ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also + set on ``clf``. + + """ + if getattr(clf, "classes_", None) is None and classes is None: + raise ValueError("classes must be passed on the first call to partial_fit.") + + elif classes is not None: + if getattr(clf, "classes_", None) is not None: + if not np.array_equal(clf.classes_, unique_labels(classes)): + raise ValueError( + "`classes=%r` is not the same as on last call " + "to partial_fit, was: %r" % (classes, clf.classes_) + ) + + else: + # This is the first call to partial_fit + clf.classes_ = unique_labels(classes) + return True + + # classes is None and clf.classes_ has already previously been set: + # nothing to do + return False + + +def class_distribution(y, sample_weight=None): + """Compute class priors from multioutput-multiclass target data. + + Parameters + ---------- + y : {array-like, sparse matrix} of size (n_samples, n_outputs) + The labels for each example. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + classes : list of size n_outputs of ndarray of size (n_classes,) + List of classes for each column. + + n_classes : list of int of size n_outputs + Number of classes in each column. + + class_prior : list of size n_outputs of ndarray of size (n_classes,) + Class distribution of each column. + """ + classes = [] + n_classes = [] + class_prior = [] + + n_samples, n_outputs = y.shape + if sample_weight is not None: + sample_weight = np.asarray(sample_weight) + + if issparse(y): + y = y.tocsc() + y_nnz = np.diff(y.indptr) + + for k in range(n_outputs): + col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]] + # separate sample weights for zero and non-zero elements + if sample_weight is not None: + nz_samp_weight = sample_weight[col_nonzero] + zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight) + else: + nz_samp_weight = None + zeros_samp_weight_sum = y.shape[0] - y_nnz[k] + + classes_k, y_k = np.unique( + y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True + ) + class_prior_k = np.bincount(y_k, weights=nz_samp_weight) + + # An explicit zero was found, combine its weight with the weight + # of the implicit zeros + if 0 in classes_k: + class_prior_k[classes_k == 0] += zeros_samp_weight_sum + + # If an there is an implicit zero and it is not in classes and + # class_prior, make an entry for it + if 0 not in classes_k and y_nnz[k] < y.shape[0]: + classes_k = np.insert(classes_k, 0, 0) + class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum) + + classes.append(classes_k) + n_classes.append(classes_k.shape[0]) + class_prior.append(class_prior_k / class_prior_k.sum()) + else: + for k in range(n_outputs): + classes_k, y_k = np.unique(y[:, k], return_inverse=True) + classes.append(classes_k) + n_classes.append(classes_k.shape[0]) + class_prior_k = np.bincount(y_k, weights=sample_weight) + class_prior.append(class_prior_k / class_prior_k.sum()) + + return (classes, n_classes, class_prior) + + +def _ovr_decision_function(predictions, confidences, n_classes): + """Compute a continuous, tie-breaking OvR decision function from OvO. + + It is important to include a continuous value, not only votes, + to make computing AUC or calibration meaningful. + + Parameters + ---------- + predictions : array-like of shape (n_samples, n_classifiers) + Predicted classes for each binary classifier. + + confidences : array-like of shape (n_samples, n_classifiers) + Decision functions or predicted probabilities for positive class + for each binary classifier. + + n_classes : int + Number of classes. n_classifiers must be + ``n_classes * (n_classes - 1 ) / 2``. + """ + n_samples = predictions.shape[0] + votes = np.zeros((n_samples, n_classes)) + sum_of_confidences = np.zeros((n_samples, n_classes)) + + k = 0 + for i in range(n_classes): + for j in range(i + 1, n_classes): + sum_of_confidences[:, i] -= confidences[:, k] + sum_of_confidences[:, j] += confidences[:, k] + votes[predictions[:, k] == 0, i] += 1 + votes[predictions[:, k] == 1, j] += 1 + k += 1 + + # Monotonically transform the sum_of_confidences to (-1/3, 1/3) + # and add it with votes. The monotonic transformation is + # f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2 + # to ensure that we won't reach the limits and change vote order. + # The motivation is to use confidence levels as a way to break ties in + # the votes without switching any decision made based on a difference + # of 1 vote. + transformed_confidences = sum_of_confidences / ( + 3 * (np.abs(sum_of_confidences) + 1) + ) + return votes + transformed_confidences diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dbc6e5f409b173d45084a36fc9feea9cf169df2e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/murmurhash.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/murmurhash.pxd b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/murmurhash.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1844be154b39de7d1ec0b069df6bdc2ecb1fa13b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/murmurhash.pxd @@ -0,0 +1,21 @@ +"""Export fast murmurhash C/C++ routines + cython wrappers""" + +cimport numpy as cnp + +# The C API is disabled for now, since it requires -I flags to get +# compilation to work even when these functions are not used. +# cdef extern from "MurmurHash3.h": +# void MurmurHash3_x86_32(void* key, int len, unsigned int seed, +# void* out) +# +# void MurmurHash3_x86_128(void* key, int len, unsigned int seed, +# void* out) +# +# void MurmurHash3_x64_128(void* key, int len, unsigned int seed, +# void* out) + + +cpdef cnp.uint32_t murmurhash3_int_u32(int key, unsigned int seed) +cpdef cnp.int32_t murmurhash3_int_s32(int key, unsigned int seed) +cpdef cnp.uint32_t murmurhash3_bytes_u32(bytes key, unsigned int seed) +cpdef cnp.int32_t murmurhash3_bytes_s32(bytes key, unsigned int seed) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/parallel.py b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..d0dc2ec2be030e341b90843122c6109e02462ad8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/parallel.py @@ -0,0 +1,129 @@ +""" +The :mod:`sklearn.utils.parallel` customizes `joblib` tools for scikit-learn usage. +""" + +import functools +import warnings +from functools import update_wrapper + +import joblib + +from .._config import config_context, get_config + + +def _with_config(delayed_func, config): + """Helper function that intends to attach a config to a delayed function.""" + if hasattr(delayed_func, "with_config"): + return delayed_func.with_config(config) + else: + warnings.warn( + ( + "`sklearn.utils.parallel.Parallel` needs to be used in " + "conjunction with `sklearn.utils.parallel.delayed` instead of " + "`joblib.delayed` to correctly propagate the scikit-learn " + "configuration to the joblib workers." + ), + UserWarning, + ) + return delayed_func + + +class Parallel(joblib.Parallel): + """Tweak of :class:`joblib.Parallel` that propagates the scikit-learn configuration. + + This subclass of :class:`joblib.Parallel` ensures that the active configuration + (thread-local) of scikit-learn is propagated to the parallel workers for the + duration of the execution of the parallel tasks. + + The API does not change and you can refer to :class:`joblib.Parallel` + documentation for more details. + + .. versionadded:: 1.3 + """ + + def __call__(self, iterable): + """Dispatch the tasks and return the results. + + Parameters + ---------- + iterable : iterable + Iterable containing tuples of (delayed_function, args, kwargs) that should + be consumed. + + Returns + ------- + results : list + List of results of the tasks. + """ + # Capture the thread-local scikit-learn configuration at the time + # Parallel.__call__ is issued since the tasks can be dispatched + # in a different thread depending on the backend and on the value of + # pre_dispatch and n_jobs. + config = get_config() + iterable_with_config = ( + (_with_config(delayed_func, config), args, kwargs) + for delayed_func, args, kwargs in iterable + ) + return super().__call__(iterable_with_config) + + +# remove when https://github.com/joblib/joblib/issues/1071 is fixed +def delayed(function): + """Decorator used to capture the arguments of a function. + + This alternative to `joblib.delayed` is meant to be used in conjunction + with `sklearn.utils.parallel.Parallel`. The latter captures the scikit- + learn configuration by calling `sklearn.get_config()` in the current + thread, prior to dispatching the first task. The captured configuration is + then propagated and enabled for the duration of the execution of the + delayed function in the joblib workers. + + .. versionchanged:: 1.3 + `delayed` was moved from `sklearn.utils.fixes` to `sklearn.utils.parallel` + in scikit-learn 1.3. + + Parameters + ---------- + function : callable + The function to be delayed. + + Returns + ------- + output: tuple + Tuple containing the delayed function, the positional arguments, and the + keyword arguments. + """ + + @functools.wraps(function) + def delayed_function(*args, **kwargs): + return _FuncWrapper(function), args, kwargs + + return delayed_function + + +class _FuncWrapper: + """Load the global configuration before calling the function.""" + + def __init__(self, function): + self.function = function + update_wrapper(self, self.function) + + def with_config(self, config): + self.config = config + return self + + def __call__(self, *args, **kwargs): + config = getattr(self, "config", None) + if config is None: + warnings.warn( + ( + "`sklearn.utils.parallel.delayed` should be used with" + " `sklearn.utils.parallel.Parallel` to make it possible to" + " propagate the scikit-learn configuration of the current thread to" + " the joblib workers." + ), + UserWarning, + ) + config = {} + with config_context(**config): + return self.function(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/sklearn/utils/sparsefuncs_fast.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/sparsefuncs_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..47afe02937a1f316299331a4138f2add8702eb66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sklearn/utils/sparsefuncs_fast.cpython-310-x86_64-linux-gnu.so differ