diff --git a/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f984960726338bcbd259779410bd901d1da631dc --- /dev/null +++ b/ckpts/universal/global_step40/zero/13.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f7c5f90dcc736b10c9a860fee444b18b8a80b0c0e1070f7d4664b26c43b687d +size 50332843 diff --git a/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..47878526c426a5cd2bc62c5f4fd9bcae02f2e7dd --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bee4537af3d84a16470ecce9a9f4fd1fe0ac6bc730c97111f07370d4f811d2b1 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..5b976d31231a8704bee6566b5c8c5285468e813c --- /dev/null +++ b/ckpts/universal/global_step40/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39bf520e527c0e2a618541079856e71dd96119a642808df2dd28f35172dcdd4f +size 33555627 diff --git a/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c653783a7a16e901f41de27f0f601d636b128388 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df0904ae0d573f831a319d36df66437abfde9e930925b37b49cc55f33c9121fb +size 9372 diff --git a/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..38837ff685af9bfd6530a04a9f574039dfb4bca7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:848b426a17e8a254353595bfa2925cc39bc4e6ac59d7b00e636a81177b08d1bd +size 9387 diff --git a/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..58c18e50786a47beb94ca563c99d466090a0f3e7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9446fed7682f41a6b4b537b5730b03286a34116c71ecf4b2f26b6fcdb4d901fb +size 9293 diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/__init__.py b/venv/lib/python3.10/site-packages/sklearn/inspection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e08785e8358bd039e8179368db28483be2cd55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/__init__.py @@ -0,0 +1,14 @@ +"""The :mod:`sklearn.inspection` module includes tools for model inspection.""" + + +from ._partial_dependence import partial_dependence +from ._permutation_importance import permutation_importance +from ._plot.decision_boundary import DecisionBoundaryDisplay +from ._plot.partial_dependence import PartialDependenceDisplay + +__all__ = [ + "partial_dependence", + "permutation_importance", + "PartialDependenceDisplay", + "DecisionBoundaryDisplay", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac33fd95fa8f8bf48f64e39389da621dcadb1565 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad6094e02478a3dd579537b79355679a1a335f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py @@ -0,0 +1,743 @@ +"""Partial dependence plots for regression and classification models.""" + +# Authors: Peter Prettenhofer +# Trevor Stephens +# Nicolas Hug +# License: BSD 3 clause + +from collections.abc import Iterable + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ..base import is_classifier, is_regressor +from ..ensemble import RandomForestRegressor +from ..ensemble._gb import BaseGradientBoosting +from ..ensemble._hist_gradient_boosting.gradient_boosting import ( + BaseHistGradientBoosting, +) +from ..exceptions import NotFittedError +from ..tree import DecisionTreeRegressor +from ..utils import ( + Bunch, + _determine_key_type, + _get_column_indices, + _safe_assign, + _safe_indexing, + check_array, + check_matplotlib_support, # noqa +) +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils.extmath import cartesian +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._pd_utils import _check_feature_names, _get_feature_index + +__all__ = [ + "partial_dependence", +] + + +def _grid_from_X(X, percentiles, is_categorical, grid_resolution): + """Generate a grid of points based on the percentiles of X. + + The grid is a cartesian product between the columns of ``values``. The + ith column of ``values`` consists in ``grid_resolution`` equally-spaced + points between the percentiles of the jth column of X. + + If ``grid_resolution`` is bigger than the number of unique values in the + j-th column of X or if the feature is a categorical feature (by inspecting + `is_categorical`) , then those unique values will be used instead. + + Parameters + ---------- + X : array-like of shape (n_samples, n_target_features) + The data. + + percentiles : tuple of float + The percentiles which are used to construct the extreme values of + the grid. Must be in [0, 1]. + + is_categorical : list of bool + For each feature, tells whether it is categorical or not. If a feature + is categorical, then the values used will be the unique ones + (i.e. categories) instead of the percentiles. + + grid_resolution : int + The number of equally spaced points to be placed on the grid for each + feature. + + Returns + ------- + grid : ndarray of shape (n_points, n_target_features) + A value for each feature at each point in the grid. ``n_points`` is + always ``<= grid_resolution ** X.shape[1]``. + + values : list of 1d ndarrays + The values with which the grid has been created. The size of each + array ``values[j]`` is either ``grid_resolution``, or the number of + unique values in ``X[:, j]``, whichever is smaller. + """ + if not isinstance(percentiles, Iterable) or len(percentiles) != 2: + raise ValueError("'percentiles' must be a sequence of 2 elements.") + if not all(0 <= x <= 1 for x in percentiles): + raise ValueError("'percentiles' values must be in [0, 1].") + if percentiles[0] >= percentiles[1]: + raise ValueError("percentiles[0] must be strictly less than percentiles[1].") + + if grid_resolution <= 1: + raise ValueError("'grid_resolution' must be strictly greater than 1.") + + values = [] + # TODO: we should handle missing values (i.e. `np.nan`) specifically and store them + # in a different Bunch attribute. + for feature, is_cat in enumerate(is_categorical): + try: + uniques = np.unique(_safe_indexing(X, feature, axis=1)) + except TypeError as exc: + # `np.unique` will fail in the presence of `np.nan` and `str` categories + # due to sorting. Temporary, we reraise an error explaining the problem. + raise ValueError( + f"The column #{feature} contains mixed data types. Finding unique " + "categories fail due to sorting. It usually means that the column " + "contains `np.nan` values together with `str` categories. Such use " + "case is not yet supported in scikit-learn." + ) from exc + if is_cat or uniques.shape[0] < grid_resolution: + # Use the unique values either because: + # - feature has low resolution use unique values + # - feature is categorical + axis = uniques + else: + # create axis based on percentiles and grid resolution + emp_percentiles = mquantiles( + _safe_indexing(X, feature, axis=1), prob=percentiles, axis=0 + ) + if np.allclose(emp_percentiles[0], emp_percentiles[1]): + raise ValueError( + "percentiles are too close to each other, " + "unable to build the grid. Please choose percentiles " + "that are further apart." + ) + axis = np.linspace( + emp_percentiles[0], + emp_percentiles[1], + num=grid_resolution, + endpoint=True, + ) + values.append(axis) + + return cartesian(values), values + + +def _partial_dependence_recursion(est, grid, features): + """Calculate partial dependence via the recursion method. + + The recursion method is in particular enabled for tree-based estimators. + + For each `grid` value, a weighted tree traversal is performed: if a split node + involves an input feature of interest, the corresponding left or right branch + is followed; otherwise both branches are followed, each branch being weighted + by the fraction of training samples that entered that branch. Finally, the + partial dependence is given by a weighted average of all the visited leaves + values. + + This method is more efficient in terms of speed than the `'brute'` method + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`). + However, here, the partial dependence computation is done explicitly with the + `X` used during training of `est`. + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict` or + :term:`decision_function`. Multioutput-multiclass classifiers are not + supported. Note that `'recursion'` is only supported for some tree-based + estimators (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ). + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + """ + averaged_predictions = est._compute_partial_dependence_recursion(grid, features) + if averaged_predictions.ndim == 1: + # reshape to (1, n_points) for consistency with + # _partial_dependence_brute + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions + + +def _partial_dependence_brute( + est, grid, features, X, response_method, sample_weight=None +): + """Calculate partial dependence via the brute force method. + + The brute method explicitly averages the predictions of an estimator over a + grid of feature values. + + For each `grid` value, all the samples from `X` have their variables of + interest replaced by that specific `grid` value. The predictions are then made + and averaged across the samples. + + This method is slower than the `'recursion'` + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`) + version for estimators with this second option. However, with the `'brute'` + force method, the average will be done with the given `X` and not the `X` + used during training, as it is done in the `'recursion'` version. Therefore + the average can always accept `sample_weight` (even when the estimator was + fitted without). + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + X : array-like of shape (n_samples, n_features) + `X` is used to generate values for the complement features. That is, for + each value in `grid`, the method will average the prediction of each + sample from `X` having that grid value for `features`. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. Note that + `sample_weight` does not change the individual predictions. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + + predictions : array-like + The predictions for the given `grid` of features values over the samples + from `X`. For non-multioutput regression and binary classification the + shape is `(n_instances, n_points)` and for multi-output regression and + multiclass classification the shape is `(n_targets, n_instances, n_points)`, + where `n_targets` is the number of targets (`n_tasks` for multi-output + regression, and `n_classes` for multiclass classification), `n_instances` + is the number of instances in `X`, and `n_points` is the number of points + in the `grid`. + """ + predictions = [] + averaged_predictions = [] + + # define the prediction_method (predict, predict_proba, decision_function). + if is_regressor(est): + prediction_method = est.predict + else: + predict_proba = getattr(est, "predict_proba", None) + decision_function = getattr(est, "decision_function", None) + if response_method == "auto": + # try predict_proba, then decision_function if it doesn't exist + prediction_method = predict_proba or decision_function + else: + prediction_method = ( + predict_proba + if response_method == "predict_proba" + else decision_function + ) + if prediction_method is None: + if response_method == "auto": + raise ValueError( + "The estimator has no predict_proba and no " + "decision_function method." + ) + elif response_method == "predict_proba": + raise ValueError("The estimator has no predict_proba method.") + else: + raise ValueError("The estimator has no decision_function method.") + + X_eval = X.copy() + for new_values in grid: + for i, variable in enumerate(features): + _safe_assign(X_eval, new_values[i], column_indexer=variable) + + try: + # Note: predictions is of shape + # (n_points,) for non-multioutput regressors + # (n_points, n_tasks) for multioutput regressors + # (n_points, 1) for the regressors in cross_decomposition (I think) + # (n_points, 2) for binary classification + # (n_points, n_classes) for multiclass classification + pred = prediction_method(X_eval) + + predictions.append(pred) + # average over samples + averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight)) + except NotFittedError as e: + raise ValueError("'estimator' parameter must be a fitted estimator") from e + + n_samples = X.shape[0] + + # reshape to (n_targets, n_instances, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + predictions = np.array(predictions).T + if is_regressor(est) and predictions.ndim == 2: + # non-multioutput regression, shape is (n_instances, n_points,) + predictions = predictions.reshape(n_samples, -1) + elif is_classifier(est) and predictions.shape[0] == 2: + # Binary classification, shape is (2, n_instances, n_points). + # we output the effect of **positive** class + predictions = predictions[1] + predictions = predictions.reshape(n_samples, -1) + + # reshape averaged_predictions to (n_targets, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + averaged_predictions = np.array(averaged_predictions).T + if is_regressor(est) and averaged_predictions.ndim == 1: + # non-multioutput regression, shape is (n_points,) + averaged_predictions = averaged_predictions.reshape(1, -1) + elif is_classifier(est) and averaged_predictions.shape[0] == 2: + # Binary classification, shape is (2, n_points). + # we output the effect of **positive** class + averaged_predictions = averaged_predictions[1] + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions, predictions + + +@validate_params( + { + "estimator": [ + HasMethods(["fit", "predict"]), + HasMethods(["fit", "predict_proba"]), + HasMethods(["fit", "decision_function"]), + ], + "X": ["array-like", "sparse matrix"], + "features": ["array-like", Integral, str], + "sample_weight": ["array-like", None], + "categorical_features": ["array-like", None], + "feature_names": ["array-like", None], + "response_method": [StrOptions({"auto", "predict_proba", "decision_function"})], + "percentiles": [tuple], + "grid_resolution": [Interval(Integral, 1, None, closed="left")], + "method": [StrOptions({"auto", "recursion", "brute"})], + "kind": [StrOptions({"average", "individual", "both"})], + }, + prefer_skip_nested_validation=True, +) +def partial_dependence( + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + response_method="auto", + percentiles=(0.05, 0.95), + grid_resolution=100, + method="auto", + kind="average", +): + """Partial dependence of ``features``. + + Partial dependence of a feature (or a set of features) corresponds to + the average response of an estimator for each possible value of the + feature. + + Read more in the :ref:`User Guide `. + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is 'brute'. + + features : array-like of {int, str, bool} or int or str + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + .. versionadded:: 1.2 + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is 'recursion', the response is always the output of + :term:`decision_function`. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the grid. Must be in [0, 1]. + + grid_resolution : int, default=100 + The number of equally spaced points on the grid, for each target + feature. + + method : {'auto', 'recursion', 'brute'}, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ) when `kind='average'`. + This is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the Individual Conditional Expectation (ICE) by + design, it is not compatible with ICE and thus `kind` must be + `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to return the partial dependence averaged across all the + samples in the dataset or one value per sample or both. + See Returns below. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + + Returns + ------- + predictions : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + individual : ndarray of shape (n_outputs, n_instances, \ + len(values[0]), len(values[1]), ...) + The predictions for all the points in the grid for all + samples in X. This is also known as Individual + Conditional Expectation (ICE). + Only available when `kind='individual'` or `kind='both'`. + + average : ndarray of shape (n_outputs, len(values[0]), \ + len(values[1]), ...) + The predictions for all the points in the grid, averaged + over all samples in X (or over the training data if + `method` is 'recursion'). + Only available when `kind='average'` or `kind='both'`. + + values : seq of 1d ndarrays + The values with which the grid has been created. + + .. deprecated:: 1.3 + The key `values` has been deprecated in 1.3 and will be removed + in 1.5 in favor of `grid_values`. See `grid_values` for details + about the `values` attribute. + + grid_values : seq of 1d ndarrays + The values with which the grid has been created. The generated + grid is a cartesian product of the arrays in `grid_values` where + `len(grid_values) == len(features)`. The size of each array + `grid_values[j]` is either `grid_resolution`, or the number of + unique values in `X[:, j]`, whichever is smaller. + + .. versionadded:: 1.3 + + `n_outputs` corresponds to the number of classes in a multi-class + setting, or to the number of tasks for multi-output regression. + For classical regression and binary classification `n_outputs==1`. + `n_values_feature_j` corresponds to the size `grid_values[j]`. + + See Also + -------- + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + PartialDependenceDisplay : Partial Dependence visualization. + + Examples + -------- + >>> X = [[0, 0, 2], [1, 0, 0]] + >>> y = [0, 1] + >>> from sklearn.ensemble import GradientBoostingClassifier + >>> gb = GradientBoostingClassifier(random_state=0).fit(X, y) + >>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1), + ... grid_resolution=2) # doctest: +SKIP + (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) + """ + check_is_fitted(estimator) + + if not (is_classifier(estimator) or is_regressor(estimator)): + raise ValueError("'estimator' must be a fitted regressor or classifier.") + + if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray): + raise ValueError("Multiclass-multioutput estimators are not supported") + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, force_all_finite="allow-nan", dtype=object) + + if is_regressor(estimator) and response_method != "auto": + raise ValueError( + "The response_method parameter is ignored for regressors and " + "must be 'auto'." + ) + + if kind != "average": + if method == "recursion": + raise ValueError( + "The 'recursion' method only applies when 'kind' is set to 'average'" + ) + method = "brute" + + if method == "recursion" and sample_weight is not None: + raise ValueError( + "The 'recursion' method can only be applied when sample_weight is None." + ) + + if method == "auto": + if sample_weight is not None: + method = "brute" + elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None: + method = "recursion" + elif isinstance( + estimator, + (BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor), + ): + method = "recursion" + else: + method = "brute" + + if method == "recursion": + if not isinstance( + estimator, + ( + BaseGradientBoosting, + BaseHistGradientBoosting, + DecisionTreeRegressor, + RandomForestRegressor, + ), + ): + supported_classes_recursion = ( + "GradientBoostingClassifier", + "GradientBoostingRegressor", + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + "HistGradientBoostingRegressor", + "DecisionTreeRegressor", + "RandomForestRegressor", + ) + raise ValueError( + "Only the following estimators support the 'recursion' " + "method: {}. Try using method='brute'.".format( + ", ".join(supported_classes_recursion) + ) + ) + if response_method == "auto": + response_method = "decision_function" + + if response_method != "decision_function": + raise ValueError( + "With the 'recursion' method, the response_method must be " + "'decision_function'. Got {}.".format(response_method) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if _determine_key_type(features, accept_slice=False) == "int": + # _get_column_indices() supports negative indexing. Here, we limit + # the indexing to be positive. The upper bound will be checked + # by _get_column_indices() + if np.any(np.less(features, 0)): + raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1)) + + features_indices = np.asarray( + _get_column_indices(X, features), dtype=np.int32, order="C" + ).ravel() + + feature_names = _check_feature_names(X, feature_names) + + n_features = X.shape[1] + if categorical_features is None: + is_categorical = [False] * len(features_indices) + else: + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [categorical_features[idx] for idx in features_indices] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + idx in categorical_features_idx for idx in features_indices + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + grid, values = _grid_from_X( + _safe_indexing(X, features_indices, axis=1), + percentiles, + is_categorical, + grid_resolution, + ) + + if method == "brute": + averaged_predictions, predictions = _partial_dependence_brute( + estimator, grid, features_indices, X, response_method, sample_weight + ) + + # reshape predictions to + # (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...) + predictions = predictions.reshape( + -1, X.shape[0], *[val.shape[0] for val in values] + ) + else: + averaged_predictions = _partial_dependence_recursion( + estimator, grid, features_indices + ) + + # reshape averaged_predictions to + # (n_outputs, n_values_feature_0, n_values_feature_1, ...) + averaged_predictions = averaged_predictions.reshape( + -1, *[val.shape[0] for val in values] + ) + pdp_results = Bunch() + + msg = ( + "Key: 'values', is deprecated in 1.3 and will be removed in 1.5. " + "Please use 'grid_values' instead." + ) + pdp_results._set_deprecated( + values, new_key="grid_values", deprecated_key="values", warning_message=msg + ) + + if kind == "average": + pdp_results["average"] = averaged_predictions + elif kind == "individual": + pdp_results["individual"] = predictions + else: # kind='both' + pdp_results["average"] = averaged_predictions + pdp_results["individual"] = predictions + + return pdp_results diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76f4d626fd53c3e669f29335e65e724e5e33e382 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py @@ -0,0 +1,64 @@ +def _check_feature_names(X, feature_names=None): + """Check feature names. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + feature_names : None or array-like of shape (n_names,), dtype=str + Feature names to check or `None`. + + Returns + ------- + feature_names : list of str + Feature names validated. If `feature_names` is `None`, then a list of + feature names is provided, i.e. the column names of a pandas dataframe + or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a + NumPy array. + """ + if feature_names is None: + if hasattr(X, "columns") and hasattr(X.columns, "tolist"): + # get the column names for a pandas dataframe + feature_names = X.columns.tolist() + else: + # define a list of numbered indices for a numpy array + feature_names = [f"x{i}" for i in range(X.shape[1])] + elif hasattr(feature_names, "tolist"): + # convert numpy array or pandas index to a list + feature_names = feature_names.tolist() + if len(set(feature_names)) != len(feature_names): + raise ValueError("feature_names should not contain duplicates.") + + return feature_names + + +def _get_feature_index(fx, feature_names=None): + """Get feature index. + + Parameters + ---------- + fx : int or str + Feature index or name. + + feature_names : list of str, default=None + All feature names from which to search the indices. + + Returns + ------- + idx : int + Feature index. + """ + if isinstance(fx, str): + if feature_names is None: + raise ValueError( + f"Cannot plot partial dependence for feature {fx!r} since " + "the list of feature names was not provided, neither as " + "column names of a pandas data-frame nor via the feature_names " + "parameter." + ) + try: + return feature_names.index(fx) + except ValueError as e: + raise ValueError(f"Feature {fx!r} not in feature_names") from e + return fx diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..3d96acff9b91a52916b0a29ad45f8d86fad8a9e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py @@ -0,0 +1,317 @@ +"""Permutation importance for estimators.""" + +import numbers + +import numpy as np + +from ..ensemble._bagging import _generate_indices +from ..metrics import check_scoring, get_scorer_names +from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer +from ..model_selection._validation import _aggregate_score_dicts +from ..utils import Bunch, _safe_indexing, check_array, check_random_state +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + RealNotInt, + StrOptions, + validate_params, +) +from ..utils.parallel import Parallel, delayed + + +def _weights_scorer(scorer, estimator, X, y, sample_weight): + if sample_weight is not None: + return scorer(estimator, X, y, sample_weight=sample_weight) + return scorer(estimator, X, y) + + +def _calculate_permutation_scores( + estimator, + X, + y, + sample_weight, + col_idx, + random_state, + n_repeats, + scorer, + max_samples, +): + """Calculate score when `col_idx` is permuted.""" + random_state = check_random_state(random_state) + + # Work on a copy of X to ensure thread-safety in case of threading based + # parallelism. Furthermore, making a copy is also useful when the joblib + # backend is 'loky' (default) or the old 'multiprocessing': in those cases, + # if X is large it will be automatically be backed by a readonly memory map + # (memmap). X.copy() on the other hand is always guaranteed to return a + # writable data-structure whose columns can be shuffled inplace. + if max_samples < X.shape[0]: + row_indices = _generate_indices( + random_state=random_state, + bootstrap=False, + n_population=X.shape[0], + n_samples=max_samples, + ) + X_permuted = _safe_indexing(X, row_indices, axis=0) + y = _safe_indexing(y, row_indices, axis=0) + if sample_weight is not None: + sample_weight = _safe_indexing(sample_weight, row_indices, axis=0) + else: + X_permuted = X.copy() + + scores = [] + shuffling_idx = np.arange(X_permuted.shape[0]) + for _ in range(n_repeats): + random_state.shuffle(shuffling_idx) + if hasattr(X_permuted, "iloc"): + col = X_permuted.iloc[shuffling_idx, col_idx] + col.index = X_permuted.index + X_permuted[X_permuted.columns[col_idx]] = col + else: + X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx] + scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight)) + + if isinstance(scores[0], dict): + scores = _aggregate_score_dicts(scores) + else: + scores = np.array(scores) + + return scores + + +def _create_importances_bunch(baseline_score, permuted_score): + """Compute the importances as the decrease in score. + + Parameters + ---------- + baseline_score : ndarray of shape (n_features,) + The baseline score without permutation. + permuted_score : ndarray of shape (n_features, n_repeats) + The permuted scores for the `n` repetitions. + + Returns + ------- + importances : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + importances_mean : ndarray, shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray, shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray, shape (n_features, n_repeats) + Raw permutation importance scores. + """ + importances = baseline_score - permuted_score + return Bunch( + importances_mean=np.mean(importances, axis=1), + importances_std=np.std(importances, axis=1), + importances=importances, + ) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like"], + "y": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_repeats": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "sample_weight": ["array-like", None], + "max_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + }, + prefer_skip_nested_validation=True, +) +def permutation_importance( + estimator, + X, + y, + *, + scoring=None, + n_repeats=5, + n_jobs=None, + random_state=None, + sample_weight=None, + max_samples=1.0, +): + """Permutation importance for feature evaluation [BRE]_. + + The :term:`estimator` is required to be a fitted estimator. `X` can be the + data set used to train the estimator or a hold-out set. The permutation + importance of a feature is calculated as follows. First, a baseline metric, + defined by :term:`scoring`, is evaluated on a (potentially different) + dataset defined by the `X`. Next, a feature column from the validation set + is permuted and the metric is evaluated again. The permutation importance + is defined to be the difference between the baseline metric and metric from + permutating the feature column. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + An estimator that has already been :term:`fitted` and is compatible + with :term:`scorer`. + + X : ndarray or DataFrame, shape (n_samples, n_features) + Data on which permutation importance will be computed. + + y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) + Targets for supervised or `None` for unsupervised. + + scoring : str, callable, list, tuple, or dict, default=None + Scorer to use. + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + Passing multiple scores to `scoring` is more efficient than calling + `permutation_importance` for each of the scores as it reuses + predictions to avoid redundant computation. + + If None, the estimator's default scorer is used. + + n_repeats : int, default=5 + Number of times to permute a feature. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. The computation is done by computing + permutation score for each columns and parallelized over the columns. + `None` means 1 unless in a :obj:`joblib.parallel_backend` context. + `-1` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Pseudo-random number generator to control the permutations of each + feature. + Pass an int to get reproducible results across function calls. + See :term:`Glossary `. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights used in scoring. + + .. versionadded:: 0.24 + + max_samples : int or float, default=1.0 + The number of samples to draw from X to compute feature importance + in each repeat (without replacement). + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + - If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples + will be used. + + While using this option may provide less accurate importance estimates, + it keeps the method tractable when evaluating feature importance on + large datasets. In combination with `n_repeats`, this allows to control + the computational speed vs statistical accuracy trade-off of this method. + + .. versionadded:: 1.0 + + Returns + ------- + result : :class:`~sklearn.utils.Bunch` or dict of such instances + Dictionary-like object, with the following attributes. + + importances_mean : ndarray of shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray of shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray of shape (n_features, n_repeats) + Raw permutation importance scores. + + If there are multiple scoring metrics in the scoring parameter + `result` is a dict with scorer names as keys (e.g. 'roc_auc') and + `Bunch` objects like above as values. + + References + ---------- + .. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, + 2001. <10.1023/A:1010933404324>` + + Examples + -------- + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import permutation_importance + >>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9], + ... [0, 9, 9],[0, 9, 9],[0, 9, 9]] + >>> y = [1, 1, 1, 0, 0, 0] + >>> clf = LogisticRegression().fit(X, y) + >>> result = permutation_importance(clf, X, y, n_repeats=10, + ... random_state=0) + >>> result.importances_mean + array([0.4666..., 0. , 0. ]) + >>> result.importances_std + array([0.2211..., 0. , 0. ]) + """ + if not hasattr(X, "iloc"): + X = check_array(X, force_all_finite="allow-nan", dtype=None) + + # Precompute random seed from the random state to be used + # to get a fresh independent RandomState instance for each + # parallel call to _calculate_permutation_scores, irrespective of + # the fact that variables are shared or not depending on the active + # joblib backend (sequential, thread-based or process-based). + random_state = check_random_state(random_state) + random_seed = random_state.randint(np.iinfo(np.int32).max + 1) + + if not isinstance(max_samples, numbers.Integral): + max_samples = int(max_samples * X.shape[0]) + elif max_samples > X.shape[0]: + raise ValueError("max_samples must be <= n_samples") + + if callable(scoring): + scorer = scoring + elif scoring is None or isinstance(scoring, str): + scorer = check_scoring(estimator, scoring=scoring) + else: + scorers_dict = _check_multimetric_scoring(estimator, scoring) + scorer = _MultimetricScorer(scorers=scorers_dict) + + baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight) + + scores = Parallel(n_jobs=n_jobs)( + delayed(_calculate_permutation_scores)( + estimator, + X, + y, + sample_weight, + col_idx, + random_seed, + n_repeats, + scorer, + max_samples, + ) + for col_idx in range(X.shape[1]) + ) + + if isinstance(baseline_score, dict): + return { + name: _create_importances_bunch( + baseline_score[name], + # unpack the permuted scores + np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]), + ) + for name in baseline_score + } + else: + return _create_importances_bunch(baseline_score, np.array(scores)) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..12162b25c53ed4a588dbf476774fc22ba41ee49a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py @@ -0,0 +1,406 @@ +import numpy as np + +from ...base import is_regressor +from ...preprocessing import LabelEncoder +from ...utils import _safe_indexing, check_matplotlib_support +from ...utils._response import _get_response_values +from ...utils.validation import ( + _is_arraylike_not_scalar, + _num_features, + check_is_fitted, +) + + +def _check_boundary_response_method(estimator, response_method, class_of_interest): + """Validate the response methods to be used with the fitted estimator. + + Parameters + ---------- + estimator : object + Fitted estimator to check. + + response_method : {'auto', 'predict_proba', 'decision_function', 'predict'} + Specifies whether to use :term:`predict_proba`, + :term:`decision_function`, :term:`predict` as the target response. + If set to 'auto', the response method is tried in the following order: + :term:`decision_function`, :term:`predict_proba`, :term:`predict`. + + class_of_interest : int, float, bool, str or None + The class considered when plotting the decision. If the label is specified, it + is then possible to plot the decision boundary in multiclass settings. + + .. versionadded:: 1.4 + + Returns + ------- + prediction_method : list of str or str + The name or list of names of the response methods to use. + """ + has_classes = hasattr(estimator, "classes_") + if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]): + msg = "Multi-label and multi-output multi-class classifiers are not supported" + raise ValueError(msg) + + if has_classes and len(estimator.classes_) > 2: + if response_method not in {"auto", "predict"} and class_of_interest is None: + msg = ( + "Multiclass classifiers are only supported when `response_method` is " + "'predict' or 'auto'. Else you must provide `class_of_interest` to " + "plot the decision boundary of a specific class." + ) + raise ValueError(msg) + prediction_method = "predict" if response_method == "auto" else response_method + elif response_method == "auto": + if is_regressor(estimator): + prediction_method = "predict" + else: + prediction_method = ["decision_function", "predict_proba", "predict"] + else: + prediction_method = response_method + + return prediction_method + + +class DecisionBoundaryDisplay: + """Decisions boundary visualization. + + It is recommended to use + :func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator` + to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as + attributes. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + xx0 : ndarray of shape (grid_resolution, grid_resolution) + First output of :func:`meshgrid `. + + xx1 : ndarray of shape (grid_resolution, grid_resolution) + Second output of :func:`meshgrid `. + + response : ndarray of shape (grid_resolution, grid_resolution) + Values of the response function. + + xlabel : str, default=None + Default label to place on x axis. + + ylabel : str, default=None + Default label to place on y axis. + + Attributes + ---------- + surface_ : matplotlib `QuadContourSet` or `QuadMesh` + If `plot_method` is 'contour' or 'contourf', `surface_` is a + :class:`QuadContourSet `. If + `plot_method` is 'pcolormesh', `surface_` is a + :class:`QuadMesh `. + + ax_ : matplotlib Axes + Axes with decision boundary. + + figure_ : matplotlib Figure + Figure containing the decision boundary. + + See Also + -------- + DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> from sklearn.inspection import DecisionBoundaryDisplay + >>> from sklearn.tree import DecisionTreeClassifier + >>> iris = load_iris() + >>> feature_1, feature_2 = np.meshgrid( + ... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()), + ... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max()) + ... ) + >>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T + >>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target) + >>> y_pred = np.reshape(tree.predict(grid), feature_1.shape) + >>> display = DecisionBoundaryDisplay( + ... xx0=feature_1, xx1=feature_2, response=y_pred + ... ) + >>> display.plot() + <...> + >>> display.ax_.scatter( + ... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black" + ... ) + <...> + >>> plt.show() + """ + + def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None): + self.xx0 = xx0 + self.xx1 = xx1 + self.response = response + self.xlabel = xlabel + self.ylabel = ylabel + + def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs): + """Plot visualization. + + Parameters + ---------- + plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf' + Plotting method to call when plotting the response. Please refer + to the following matplotlib documentation for details: + :func:`contourf `, + :func:`contour `, + :func:`pcolormesh `. + + ax : Matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + xlabel : str, default=None + Overwrite the x-axis label. + + ylabel : str, default=None + Overwrite the y-axis label. + + **kwargs : dict + Additional keyword arguments to be passed to the `plot_method`. + + Returns + ------- + display: :class:`~sklearn.inspection.DecisionBoundaryDisplay` + Object that stores computed values. + """ + check_matplotlib_support("DecisionBoundaryDisplay.plot") + import matplotlib.pyplot as plt # noqa + + if plot_method not in ("contourf", "contour", "pcolormesh"): + raise ValueError( + "plot_method must be 'contourf', 'contour', or 'pcolormesh'" + ) + + if ax is None: + _, ax = plt.subplots() + + plot_func = getattr(ax, plot_method) + self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs) + + if xlabel is not None or not ax.get_xlabel(): + xlabel = self.xlabel if xlabel is None else xlabel + ax.set_xlabel(xlabel) + if ylabel is not None or not ax.get_ylabel(): + ylabel = self.ylabel if ylabel is None else ylabel + ax.set_ylabel(ylabel) + + self.ax_ = ax + self.figure_ = ax.figure + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + *, + grid_resolution=100, + eps=1.0, + plot_method="contourf", + response_method="auto", + class_of_interest=None, + xlabel=None, + ylabel=None, + ax=None, + **kwargs, + ): + """Plot decision boundary given an estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + Trained estimator used to plot the decision boundary. + + X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2) + Input data that should be only 2-dimensional. + + grid_resolution : int, default=100 + Number of grid points to use for plotting decision boundary. + Higher values will make the plot look nicer but be slower to + render. + + eps : float, default=1.0 + Extends the minimum and maximum values of X for evaluating the + response function. + + plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf' + Plotting method to call when plotting the response. Please refer + to the following matplotlib documentation for details: + :func:`contourf `, + :func:`contour `, + :func:`pcolormesh `. + + response_method : {'auto', 'predict_proba', 'decision_function', \ + 'predict'}, default='auto' + Specifies whether to use :term:`predict_proba`, + :term:`decision_function`, :term:`predict` as the target response. + If set to 'auto', the response method is tried in the following order: + :term:`decision_function`, :term:`predict_proba`, :term:`predict`. + For multiclass problems, :term:`predict` is selected when + `response_method="auto"`. + + class_of_interest : int, float, bool or str, default=None + The class considered when plotting the decision. If None, + `estimator.classes_[1]` is considered as the positive class + for binary classifiers. For multiclass classifiers, passing + an explicit value for `class_of_interest` is mandatory. + + .. versionadded:: 1.4 + + xlabel : str, default=None + The label used for the x-axis. If `None`, an attempt is made to + extract a label from `X` if it is a dataframe, otherwise an empty + string is used. + + ylabel : str, default=None + The label used for the y-axis. If `None`, an attempt is made to + extract a label from `X` if it is a dataframe, otherwise an empty + string is used. + + ax : Matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Additional keyword arguments to be passed to the + `plot_method`. + + Returns + ------- + display : :class:`~sklearn.inspection.DecisionBoundaryDisplay` + Object that stores the result. + + See Also + -------- + DecisionBoundaryDisplay : Decision boundary visualization. + sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the + confusion matrix given an estimator, the data, and the label. + sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the + confusion matrix given the true and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import DecisionBoundaryDisplay + >>> iris = load_iris() + >>> X = iris.data[:, :2] + >>> classifier = LogisticRegression().fit(X, iris.target) + >>> disp = DecisionBoundaryDisplay.from_estimator( + ... classifier, X, response_method="predict", + ... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], + ... alpha=0.5, + ... ) + >>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k") + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + check_is_fitted(estimator) + + if not grid_resolution > 1: + raise ValueError( + "grid_resolution must be greater than 1. Got" + f" {grid_resolution} instead." + ) + + if not eps >= 0: + raise ValueError( + f"eps must be greater than or equal to 0. Got {eps} instead." + ) + + possible_plot_methods = ("contourf", "contour", "pcolormesh") + if plot_method not in possible_plot_methods: + available_methods = ", ".join(possible_plot_methods) + raise ValueError( + f"plot_method must be one of {available_methods}. " + f"Got {plot_method} instead." + ) + + num_features = _num_features(X) + if num_features != 2: + raise ValueError( + f"n_features must be equal to 2. Got {num_features} instead." + ) + + x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1) + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + xx0, xx1 = np.meshgrid( + np.linspace(x0_min, x0_max, grid_resolution), + np.linspace(x1_min, x1_max, grid_resolution), + ) + if hasattr(X, "iloc"): + # we need to preserve the feature names and therefore get an empty dataframe + X_grid = X.iloc[[], :].copy() + X_grid.iloc[:, 0] = xx0.ravel() + X_grid.iloc[:, 1] = xx1.ravel() + else: + X_grid = np.c_[xx0.ravel(), xx1.ravel()] + + prediction_method = _check_boundary_response_method( + estimator, response_method, class_of_interest + ) + try: + response, _, response_method_used = _get_response_values( + estimator, + X_grid, + response_method=prediction_method, + pos_label=class_of_interest, + return_response_method_used=True, + ) + except ValueError as exc: + if "is not a valid label" in str(exc): + # re-raise a more informative error message since `pos_label` is unknown + # to our user when interacting with + # `DecisionBoundaryDisplay.from_estimator` + raise ValueError( + f"class_of_interest={class_of_interest} is not a valid label: It " + f"should be one of {estimator.classes_}" + ) from exc + raise + + # convert classes predictions into integers + if response_method_used == "predict" and hasattr(estimator, "classes_"): + encoder = LabelEncoder() + encoder.classes_ = estimator.classes_ + response = encoder.transform(response) + + if response.ndim != 1: + if is_regressor(estimator): + raise ValueError("Multi-output regressors are not supported") + + # For the multiclass case, `_get_response_values` returns the response + # as-is. Thus, we have a column per class and we need to select the column + # corresponding to the positive class. + col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0] + response = response[:, col_idx] + + if xlabel is None: + xlabel = X.columns[0] if hasattr(X, "columns") else "" + + if ylabel is None: + ylabel = X.columns[1] if hasattr(X, "columns") else "" + + display = cls( + xx0=xx0, + xx1=xx1, + response=response.reshape(xx0.shape), + xlabel=xlabel, + ylabel=ylabel, + ) + return display.plot(ax=ax, plot_method=plot_method, **kwargs) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..078db1a3260007e50bc63edcf17e3d7811cacf2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py @@ -0,0 +1,1473 @@ +import numbers +from itertools import chain +from math import ceil + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ...base import is_regressor +from ...utils import ( + Bunch, + _safe_indexing, + check_array, + check_matplotlib_support, # noqa + check_random_state, +) +from ...utils._encode import _unique +from ...utils.parallel import Parallel, delayed +from .. import partial_dependence +from .._pd_utils import _check_feature_names, _get_feature_index + + +class PartialDependenceDisplay: + """Partial Dependence Plot (PDP). + + This can also display individual partial dependencies which are often + referred to as: Individual Condition Expectation (ICE). + + It is recommended to use + :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a + :class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are + stored as attributes. + + Read more in + :ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py` + and the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + pd_results : list of Bunch + Results of :func:`~sklearn.inspection.partial_dependence` for + ``features``. + + features : list of (int,) or list of (int, int) + Indices of features for a given plot. A tuple of one integer will plot + a partial dependence curve of one feature. A tuple of two integers will + plot a two-way partial dependence curve as a contour plot. + + feature_names : list of str + Feature names corresponding to the indices in ``features``. + + target_idx : int + + - In a multiclass setting, specifies the class for which the PDPs + should be computed. Note that for binary classification, the + positive class (index 1) is always used. + - In a multioutput setting, specifies the task for which the PDPs + should be computed. + + Ignored in binary classification or classical regression settings. + + deciles : dict + Deciles for feature indices in ``features``. + + kind : {'average', 'individual', 'both'} or list of such str, \ + default='average' + Whether to plot the partial dependence averaged across all the samples + in the dataset or one line per sample or both. + + - ``kind='average'`` results in the traditional PD plot; + - ``kind='individual'`` results in the ICE plot; + - ``kind='both'`` results in plotting both the ICE and PD on the same + plot. + + A list of such strings can be provided to specify `kind` on a per-plot + basis. The length of the list should be the same as the number of + interaction requested in `features`. + + .. note:: + ICE ('individual' or 'both') is not a valid option for 2-ways + interactions plot. As a result, an error will be raised. + 2-ways interaction plots should always be configured to + use the 'average' kind instead. + + .. note:: + The fast ``method='recursion'`` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + Add `kind` parameter with `'average'`, `'individual'`, and `'both'` + options. + + .. versionadded:: 1.1 + Add the possibility to pass a list of string specifying `kind` + for each plot. + + subsample : float, int or None, default=1000 + Sampling for ICE curves when `kind` is 'individual' or 'both'. + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to be used to plot ICE curves. If int, represents the + maximum absolute number of samples to use. + + Note that the full dataset is still used to calculate partial + dependence when `kind='both'`. + + .. versionadded:: 0.24 + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the selected samples when subsamples is not + `None`. See :term:`Glossary ` for details. + + .. versionadded:: 0.24 + + is_categorical : list of (bool,) or list of (bool, bool), default=None + Whether each target feature in `features` is categorical or not. + The list should be same size as `features`. If `None`, all features + are assumed to be continuous. + + .. versionadded:: 1.2 + + Attributes + ---------- + bounding_ax_ : matplotlib Axes or None + If `ax` is an axes or None, the `bounding_ax_` is the axes where the + grid of partial dependence plots are drawn. If `ax` is a list of axes + or a numpy array of axes, `bounding_ax_` is None. + + axes_ : ndarray of matplotlib Axes + If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row + and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item + in `ax`. Elements that are None correspond to a nonexisting axes in + that position. + + lines_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `lines_[i, j]` is the partial dependence + curve on the i-th row and j-th column. If `ax` is a list of axes, + `lines_[i]` is the partial dependence curve corresponding to the i-th + item in `ax`. Elements that are None correspond to a nonexisting axes + or an axes that does not include a line plot. + + deciles_vlines_ : ndarray of matplotlib LineCollection + If `ax` is an axes or None, `vlines_[i, j]` is the line collection + representing the x axis deciles of the i-th row and j-th column. If + `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in + `ax`. Elements that are None correspond to a nonexisting axes or an + axes that does not include a PDP plot. + + .. versionadded:: 0.23 + + deciles_hlines_ : ndarray of matplotlib LineCollection + If `ax` is an axes or None, `vlines_[i, j]` is the line collection + representing the y axis deciles of the i-th row and j-th column. If + `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in + `ax`. Elements that are None correspond to a nonexisting axes or an + axes that does not include a 2-way plot. + + .. versionadded:: 0.23 + + contours_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `contours_[i, j]` is the partial dependence + plot on the i-th row and j-th column. If `ax` is a list of axes, + `contours_[i]` is the partial dependence plot corresponding to the i-th + item in `ax`. Elements that are None correspond to a nonexisting axes + or an axes that does not include a contour plot. + + bars_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar + plot on the i-th row and j-th column (for a categorical feature). + If `ax` is a list of axes, `bars_[i]` is the partial dependence bar + plot corresponding to the i-th item in `ax`. Elements that are None + correspond to a nonexisting axes or an axes that does not include a + bar plot. + + .. versionadded:: 1.2 + + heatmaps_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence + heatmap on the i-th row and j-th column (for a pair of categorical + features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial + dependence heatmap corresponding to the i-th item in `ax`. Elements + that are None correspond to a nonexisting axes or an axes that does not + include a heatmap. + + .. versionadded:: 1.2 + + figure_ : matplotlib Figure + Figure containing partial dependence plots. + + See Also + -------- + partial_dependence : Compute Partial Dependence values. + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.inspection import PartialDependenceDisplay + >>> from sklearn.inspection import partial_dependence + >>> X, y = make_friedman1() + >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) + >>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])] + >>> deciles = {0: np.linspace(0, 1, num=5)} + >>> pd_results = partial_dependence( + ... clf, X, features=0, kind="average", grid_resolution=5) + >>> display = PartialDependenceDisplay( + ... [pd_results], features=features, feature_names=feature_names, + ... target_idx=0, deciles=deciles + ... ) + >>> display.plot(pdp_lim={1: (-1.38, 0.66)}) + <...> + >>> plt.show() + """ + + def __init__( + self, + pd_results, + *, + features, + feature_names, + target_idx, + deciles, + kind="average", + subsample=1000, + random_state=None, + is_categorical=None, + ): + self.pd_results = pd_results + self.features = features + self.feature_names = feature_names + self.target_idx = target_idx + self.deciles = deciles + self.kind = kind + self.subsample = subsample + self.random_state = random_state + self.is_categorical = is_categorical + + @classmethod + def from_estimator( + cls, + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + target=None, + response_method="auto", + n_cols=3, + grid_resolution=100, + percentiles=(0.05, 0.95), + method="auto", + n_jobs=None, + verbose=0, + line_kw=None, + ice_lines_kw=None, + pd_line_kw=None, + contour_kw=None, + ax=None, + kind="average", + centered=False, + subsample=1000, + random_state=None, + ): + """Partial dependence (PD) and individual conditional expectation (ICE) plots. + + Partial dependence plots, individual conditional expectation plots or an + overlay of both of them can be plotted by setting the ``kind`` + parameter. The ``len(features)`` plots are arranged in a grid with + ``n_cols`` columns. Two-way partial dependence plots are plotted as + contour plots. The deciles of the feature values will be shown with tick + marks on the x-axes for one-way plots, and on both axes for two-way + plots. + + Read more in the :ref:`User Guide `. + + .. note:: + + :func:`PartialDependenceDisplay.from_estimator` does not support using the + same axes with multiple calls. To plot the partial dependence for + multiple estimators, please pass the axes created by the first call to the + second call:: + + >>> from sklearn.inspection import PartialDependenceDisplay + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.ensemble import RandomForestRegressor + >>> X, y = make_friedman1() + >>> est1 = LinearRegression().fit(X, y) + >>> est2 = RandomForestRegressor().fit(X, y) + >>> disp1 = PartialDependenceDisplay.from_estimator(est1, X, + ... [1, 2]) + >>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2], + ... ax=disp1.axes_) + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is `'brute'`. + + features : list of {int, str, pair of int, pair of str} + The target features for which to create the PDPs. + If `features[i]` is an integer or a string, a one-way PDP is created; + if `features[i]` is a tuple, a two-way PDP is created (only supported + with `kind='average'`). Each tuple must be of size 2. + If any entry is a string, then it must be in ``feature_names``. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + target : int, default=None + - In a multiclass setting, specifies the class for which the PDPs + should be computed. Note that for binary classification, the + positive class (index 1) is always used. + - In a multioutput setting, specifies the task for which the PDPs + should be computed. + + Ignored in binary classification or classical regression settings. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is `'recursion'`, the response is always the output of + :term:`decision_function`. + + n_cols : int, default=3 + The maximum number of columns in the grid plot. Only active when `ax` + is a single axis or `None`. + + grid_resolution : int, default=100 + The number of equally spaced points on the axes of the plots, for each + target feature. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the PDP axes. Must be in [0, 1]. + + method : str, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor` + but is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the ICEs by design, it is not compatible with ICE and + thus `kind` must be `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + n_jobs : int, default=None + The number of CPUs to use to compute the partial dependences. + Computation is parallelized over features specified by the `features` + parameter. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + Verbose output during PD computations. + + line_kw : dict, default=None + Dict with keywords passed to the ``matplotlib.pyplot.plot`` call. + For one-way partial dependence plots. It can be used to define common + properties for both `ice_lines_kw` and `pdp_line_kw`. + + ice_lines_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For ICE lines in the one-way partial dependence plots. + The key value pairs defined in `ice_lines_kw` takes priority over + `line_kw`. + + pd_line_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For partial dependence in one-way partial dependence plots. + The key value pairs defined in `pd_line_kw` takes priority over + `line_kw`. + + contour_kw : dict, default=None + Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call. + For two-way partial dependence plots. + + ax : Matplotlib axes or array-like of Matplotlib axes, default=None + - If a single axis is passed in, it is treated as a bounding axes + and a grid of partial dependence plots will be drawn within + these bounds. The `n_cols` parameter controls the number of + columns in the grid. + - If an array-like of axes are passed in, the partial dependence + plots will be drawn directly into these axes. + - If `None`, a figure and a bounding axes is created and treated + as the single axes case. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to plot the partial dependence averaged across all the samples + in the dataset or one line per sample or both. + + - ``kind='average'`` results in the traditional PD plot; + - ``kind='individual'`` results in the ICE plot. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + centered : bool, default=False + If `True`, the ICE and PD lines will start at the origin of the + y-axis. By default, no centering is done. + + .. versionadded:: 1.1 + + subsample : float, int or None, default=1000 + Sampling for ICE curves when `kind` is 'individual' or 'both'. + If `float`, should be between 0.0 and 1.0 and represent the proportion + of the dataset to be used to plot ICE curves. If `int`, represents the + absolute number samples to use. + + Note that the full dataset is still used to calculate averaged partial + dependence when `kind='both'`. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the selected samples when subsamples is not + `None` and `kind` is either `'both'` or `'individual'`. + See :term:`Glossary ` for details. + + Returns + ------- + display : :class:`~sklearn.inspection.PartialDependenceDisplay` + + See Also + -------- + partial_dependence : Compute Partial Dependence values. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.inspection import PartialDependenceDisplay + >>> X, y = make_friedman1() + >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) + >>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)]) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") # noqa + import matplotlib.pyplot as plt # noqa + + # set target_idx for multi-class estimators + if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2: + if target is None: + raise ValueError("target must be specified for multi-class") + target_idx = np.searchsorted(estimator.classes_, target) + if ( + not (0 <= target_idx < len(estimator.classes_)) + or estimator.classes_[target_idx] != target + ): + raise ValueError("target not in est.classes_, got {}".format(target)) + else: + # regression and binary classification + target_idx = 0 + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, force_all_finite="allow-nan", dtype=object) + n_features = X.shape[1] + + feature_names = _check_feature_names(X, feature_names) + # expand kind to always be a list of str + kind_ = [kind] * len(features) if isinstance(kind, str) else kind + if len(kind_) != len(features): + raise ValueError( + "When `kind` is provided as a list of strings, it should contain " + f"as many elements as `features`. `kind` contains {len(kind_)} " + f"element(s) and `features` contains {len(features)} element(s)." + ) + + # convert features into a seq of int tuples + tmp_features, ice_for_two_way_pd = [], [] + for kind_plot, fxs in zip(kind_, features): + if isinstance(fxs, (numbers.Integral, str)): + fxs = (fxs,) + try: + fxs = tuple( + _get_feature_index(fx, feature_names=feature_names) for fx in fxs + ) + except TypeError as e: + raise ValueError( + "Each entry in features must be either an int, " + "a string, or an iterable of size at most 2." + ) from e + if not 1 <= np.size(fxs) <= 2: + raise ValueError( + "Each entry in features must be either an int, " + "a string, or an iterable of size at most 2." + ) + # store the information if 2-way PD was requested with ICE to later + # raise a ValueError with an exhaustive list of problematic + # settings. + ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1) + + tmp_features.append(fxs) + + if any(ice_for_two_way_pd): + # raise an error and be specific regarding the parameter values + # when 1- and 2-way PD were requested + kind_ = [ + "average" if forcing_average else kind_plot + for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_) + ] + raise ValueError( + "ICE plot cannot be rendered for 2-way feature interactions. " + "2-way feature interactions mandates PD plots using the " + "'average' kind: " + f"features={features!r} should be configured to use " + f"kind={kind_!r} explicitly." + ) + features = tmp_features + + if categorical_features is None: + is_categorical = [ + (False,) if len(fxs) == 1 else (False, False) for fxs in features + ] + else: + # we need to create a boolean indicator of which features are + # categorical from the categorical_features list. + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [ + tuple(categorical_features[fx] for fx in fxs) for fxs in features + ] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + tuple([idx in categorical_features_idx for idx in fxs]) + for fxs in features + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + for cats in is_categorical: + if np.size(cats) == 2 and (cats[0] != cats[1]): + raise ValueError( + "Two-way partial dependence plots are not supported for pairs" + " of continuous and categorical features." + ) + + # collect the indices of the categorical features targeted by the partial + # dependence computation + categorical_features_targeted = set( + [ + fx + for fxs, cats in zip(features, is_categorical) + for fx in fxs + if any(cats) + ] + ) + if categorical_features_targeted: + min_n_cats = min( + [ + len(_unique(_safe_indexing(X, idx, axis=1))) + for idx in categorical_features_targeted + ] + ) + if grid_resolution < min_n_cats: + raise ValueError( + "The resolution of the computed grid is less than the " + "minimum number of categories in the targeted categorical " + "features. Expect the `grid_resolution` to be greater than " + f"{min_n_cats}. Got {grid_resolution} instead." + ) + + for is_cat, kind_plot in zip(is_categorical, kind_): + if any(is_cat) and kind_plot != "average": + raise ValueError( + "It is not possible to display individual effects for" + " categorical features." + ) + + # Early exit if the axes does not have the correct number of axes + if ax is not None and not isinstance(ax, plt.Axes): + axes = np.asarray(ax, dtype=object) + if axes.size != len(features): + raise ValueError( + "Expected ax to have {} axes, got {}".format( + len(features), axes.size + ) + ) + + for i in chain.from_iterable(features): + if i >= len(feature_names): + raise ValueError( + "All entries of features must be less than " + "len(feature_names) = {0}, got {1}.".format(len(feature_names), i) + ) + + if isinstance(subsample, numbers.Integral): + if subsample <= 0: + raise ValueError( + f"When an integer, subsample={subsample} should be positive." + ) + elif isinstance(subsample, numbers.Real): + if subsample <= 0 or subsample >= 1: + raise ValueError( + f"When a floating-point, subsample={subsample} should be in " + "the (0, 1) range." + ) + + # compute predictions and/or averaged predictions + pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(partial_dependence)( + estimator, + X, + fxs, + sample_weight=sample_weight, + feature_names=feature_names, + categorical_features=categorical_features, + response_method=response_method, + method=method, + grid_resolution=grid_resolution, + percentiles=percentiles, + kind=kind_plot, + ) + for kind_plot, fxs in zip(kind_, features) + ) + + # For multioutput regression, we can only check the validity of target + # now that we have the predictions. + # Also note: as multiclass-multioutput classifiers are not supported, + # multiclass and multioutput scenario are mutually exclusive. So there is + # no risk of overwriting target_idx here. + pd_result = pd_results[0] # checking the first result is enough + n_tasks = ( + pd_result.average.shape[0] + if kind_[0] == "average" + else pd_result.individual.shape[0] + ) + if is_regressor(estimator) and n_tasks > 1: + if target is None: + raise ValueError("target must be specified for multi-output regressors") + if not 0 <= target <= n_tasks: + raise ValueError( + "target must be in [0, n_tasks], got {}.".format(target) + ) + target_idx = target + + deciles = {} + for fxs, cats in zip(features, is_categorical): + for fx, cat in zip(fxs, cats): + if not cat and fx not in deciles: + X_col = _safe_indexing(X, fx, axis=1) + deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1)) + + display = cls( + pd_results=pd_results, + features=features, + feature_names=feature_names, + target_idx=target_idx, + deciles=deciles, + kind=kind, + subsample=subsample, + random_state=random_state, + is_categorical=is_categorical, + ) + return display.plot( + ax=ax, + n_cols=n_cols, + line_kw=line_kw, + ice_lines_kw=ice_lines_kw, + pd_line_kw=pd_line_kw, + contour_kw=contour_kw, + centered=centered, + ) + + def _get_sample_count(self, n_samples): + """Compute the number of samples as an integer.""" + if isinstance(self.subsample, numbers.Integral): + if self.subsample < n_samples: + return self.subsample + return n_samples + elif isinstance(self.subsample, numbers.Real): + return ceil(n_samples * self.subsample) + return n_samples + + def _plot_ice_lines( + self, + preds, + feature_values, + n_ice_to_plot, + ax, + pd_plot_idx, + n_total_lines_by_plot, + individual_line_kw, + ): + """Plot the ICE lines. + + Parameters + ---------- + preds : ndarray of shape \ + (n_instances, n_grid_points) + The predictions computed for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + n_ice_to_plot : int + The number of ICE lines to plot. + ax : Matplotlib axes + The axis on which to plot the ICE lines. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + n_total_lines_by_plot : int + The total number of lines expected to be plot on the axis. + individual_line_kw : dict + Dict with keywords passed when plotting the ICE lines. + """ + rng = check_random_state(self.random_state) + # subsample ice + ice_lines_idx = rng.choice( + preds.shape[0], + n_ice_to_plot, + replace=False, + ) + ice_lines_subsampled = preds[ice_lines_idx, :] + # plot the subsampled ice + for ice_idx, ice in enumerate(ice_lines_subsampled): + line_idx = np.unravel_index( + pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape + ) + self.lines_[line_idx] = ax.plot( + feature_values, ice.ravel(), **individual_line_kw + )[0] + + def _plot_average_dependence( + self, + avg_preds, + feature_values, + ax, + pd_line_idx, + line_kw, + categorical, + bar_kw, + ): + """Plot the average partial dependence. + + Parameters + ---------- + avg_preds : ndarray of shape (n_grid_points,) + The average predictions for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + ax : Matplotlib axes + The axis on which to plot the average PD. + pd_line_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + line_kw : dict + Dict with keywords passed when plotting the PD plot. + categorical : bool + Whether feature is categorical. + bar_kw: dict + Dict with keywords passed when plotting the PD bars (categorical). + """ + if categorical: + bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape) + self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0] + ax.tick_params(axis="x", rotation=90) + else: + line_idx = np.unravel_index(pd_line_idx, self.lines_.shape) + self.lines_[line_idx] = ax.plot( + feature_values, + avg_preds, + **line_kw, + )[0] + + def _plot_one_way_partial_dependence( + self, + kind, + preds, + avg_preds, + feature_values, + feature_idx, + n_ice_lines, + ax, + n_cols, + pd_plot_idx, + n_lines, + ice_lines_kw, + pd_line_kw, + categorical, + bar_kw, + pdp_lim, + ): + """Plot 1-way partial dependence: ICE and PDP. + + Parameters + ---------- + kind : str + The kind of partial plot to draw. + preds : ndarray of shape \ + (n_instances, n_grid_points) or None + The predictions computed for all points of `feature_values` for a + given feature for all samples in `X`. + avg_preds : ndarray of shape (n_grid_points,) + The average predictions for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + feature_idx : int + The index corresponding to the target feature. + n_ice_lines : int + The number of ICE lines to plot. + ax : Matplotlib axes + The axis on which to plot the ICE and PDP lines. + n_cols : int or None + The number of column in the axis. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + n_lines : int + The total number of lines expected to be plot on the axis. + ice_lines_kw : dict + Dict with keywords passed when plotting the ICE lines. + pd_line_kw : dict + Dict with keywords passed when plotting the PD plot. + categorical : bool + Whether feature is categorical. + bar_kw: dict + Dict with keywords passed when plotting the PD bars (categorical). + pdp_lim : dict + Global min and max average predictions, such that all plots will + have the same scale and y limits. `pdp_lim[1]` is the global min + and max for single partial dependence curves. + """ + from matplotlib import transforms # noqa + + if kind in ("individual", "both"): + self._plot_ice_lines( + preds[self.target_idx], + feature_values, + n_ice_lines, + ax, + pd_plot_idx, + n_lines, + ice_lines_kw, + ) + + if kind in ("average", "both"): + # the average is stored as the last line + if kind == "average": + pd_line_idx = pd_plot_idx + else: + pd_line_idx = pd_plot_idx * n_lines + n_ice_lines + self._plot_average_dependence( + avg_preds[self.target_idx].ravel(), + feature_values, + ax, + pd_line_idx, + pd_line_kw, + categorical, + bar_kw, + ) + + trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) + # create the decile line for the vertical axis + vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) + if self.deciles.get(feature_idx[0], None) is not None: + self.deciles_vlines_[vlines_idx] = ax.vlines( + self.deciles[feature_idx[0]], + 0, + 0.05, + transform=trans, + color="k", + ) + # reset ylim which was overwritten by vlines + min_val = min(val[0] for val in pdp_lim.values()) + max_val = max(val[1] for val in pdp_lim.values()) + ax.set_ylim([min_val, max_val]) + + # Set xlabel if it is not already set + if not ax.get_xlabel(): + ax.set_xlabel(self.feature_names[feature_idx[0]]) + + if n_cols is None or pd_plot_idx % n_cols == 0: + if not ax.get_ylabel(): + ax.set_ylabel("Partial dependence") + else: + ax.set_yticklabels([]) + + if pd_line_kw.get("label", None) and kind != "individual" and not categorical: + ax.legend() + + def _plot_two_way_partial_dependence( + self, + avg_preds, + feature_values, + feature_idx, + ax, + pd_plot_idx, + Z_level, + contour_kw, + categorical, + heatmap_kw, + ): + """Plot 2-way partial dependence. + + Parameters + ---------- + avg_preds : ndarray of shape \ + (n_instances, n_grid_points, n_grid_points) + The average predictions for all points of `feature_values[0]` and + `feature_values[1]` for some given features for all samples in `X`. + feature_values : seq of 1d array + A sequence of array of the feature values for which the predictions + have been computed. + feature_idx : tuple of int + The indices of the target features + ax : Matplotlib axes + The axis on which to plot the ICE and PDP lines. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + Z_level : ndarray of shape (8, 8) + The Z-level used to encode the average predictions. + contour_kw : dict + Dict with keywords passed when plotting the contours. + categorical : bool + Whether features are categorical. + heatmap_kw: dict + Dict with keywords passed when plotting the PD heatmap + (categorical). + """ + if categorical: + import matplotlib.pyplot as plt + + default_im_kw = dict(interpolation="nearest", cmap="viridis") + im_kw = {**default_im_kw, **heatmap_kw} + + data = avg_preds[self.target_idx] + im = ax.imshow(data, **im_kw) + text = None + cmap_min, cmap_max = im.cmap(0), im.cmap(1.0) + + text = np.empty_like(data, dtype=object) + # print text with appropriate color depending on background + thresh = (data.max() + data.min()) / 2.0 + + for flat_index in range(data.size): + row, col = np.unravel_index(flat_index, data.shape) + color = cmap_max if data[row, col] < thresh else cmap_min + + values_format = ".2f" + text_data = format(data[row, col], values_format) + + text_kwargs = dict(ha="center", va="center", color=color) + text[row, col] = ax.text(col, row, text_data, **text_kwargs) + + fig = ax.figure + fig.colorbar(im, ax=ax) + ax.set( + xticks=np.arange(len(feature_values[1])), + yticks=np.arange(len(feature_values[0])), + xticklabels=feature_values[1], + yticklabels=feature_values[0], + xlabel=self.feature_names[feature_idx[1]], + ylabel=self.feature_names[feature_idx[0]], + ) + + plt.setp(ax.get_xticklabels(), rotation="vertical") + + heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape) + self.heatmaps_[heatmap_idx] = im + else: + from matplotlib import transforms # noqa + + XX, YY = np.meshgrid(feature_values[0], feature_values[1]) + Z = avg_preds[self.target_idx].T + CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors="k") + contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape) + self.contours_[contour_idx] = ax.contourf( + XX, + YY, + Z, + levels=Z_level, + vmax=Z_level[-1], + vmin=Z_level[0], + **contour_kw, + ) + ax.clabel(CS, fmt="%2.2f", colors="k", fontsize=10, inline=True) + + trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) + # create the decile line for the vertical axis + xlim, ylim = ax.get_xlim(), ax.get_ylim() + vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) + self.deciles_vlines_[vlines_idx] = ax.vlines( + self.deciles[feature_idx[0]], + 0, + 0.05, + transform=trans, + color="k", + ) + # create the decile line for the horizontal axis + hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape) + self.deciles_hlines_[hlines_idx] = ax.hlines( + self.deciles[feature_idx[1]], + 0, + 0.05, + transform=trans, + color="k", + ) + # reset xlim and ylim since they are overwritten by hlines and + # vlines + ax.set_xlim(xlim) + ax.set_ylim(ylim) + + # set xlabel if it is not already set + if not ax.get_xlabel(): + ax.set_xlabel(self.feature_names[feature_idx[0]]) + ax.set_ylabel(self.feature_names[feature_idx[1]]) + + def plot( + self, + *, + ax=None, + n_cols=3, + line_kw=None, + ice_lines_kw=None, + pd_line_kw=None, + contour_kw=None, + bar_kw=None, + heatmap_kw=None, + pdp_lim=None, + centered=False, + ): + """Plot partial dependence plots. + + Parameters + ---------- + ax : Matplotlib axes or array-like of Matplotlib axes, default=None + - If a single axis is passed in, it is treated as a bounding axes + and a grid of partial dependence plots will be drawn within + these bounds. The `n_cols` parameter controls the number of + columns in the grid. + - If an array-like of axes are passed in, the partial dependence + plots will be drawn directly into these axes. + - If `None`, a figure and a bounding axes is created and treated + as the single axes case. + + n_cols : int, default=3 + The maximum number of columns in the grid plot. Only active when + `ax` is a single axes or `None`. + + line_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.plot` call. + For one-way partial dependence plots. + + ice_lines_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For ICE lines in the one-way partial dependence plots. + The key value pairs defined in `ice_lines_kw` takes priority over + `line_kw`. + + .. versionadded:: 1.0 + + pd_line_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For partial dependence in one-way partial dependence plots. + The key value pairs defined in `pd_line_kw` takes priority over + `line_kw`. + + .. versionadded:: 1.0 + + contour_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.contourf` + call for two-way partial dependence plots. + + bar_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.bar` + call for one-way categorical partial dependence plots. + + .. versionadded:: 1.2 + + heatmap_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.imshow` + call for two-way categorical partial dependence plots. + + .. versionadded:: 1.2 + + pdp_lim : dict, default=None + Global min and max average predictions, such that all plots will have the + same scale and y limits. `pdp_lim[1]` is the global min and max for single + partial dependence curves. `pdp_lim[2]` is the global min and max for + two-way partial dependence curves. If `None` (default), the limit will be + inferred from the global minimum and maximum of all predictions. + + .. versionadded:: 1.1 + + centered : bool, default=False + If `True`, the ICE and PD lines will start at the origin of the + y-axis. By default, no centering is done. + + .. versionadded:: 1.1 + + Returns + ------- + display : :class:`~sklearn.inspection.PartialDependenceDisplay` + Returns a :class:`~sklearn.inspection.PartialDependenceDisplay` + object that contains the partial dependence plots. + """ + + check_matplotlib_support("plot_partial_dependence") + import matplotlib.pyplot as plt # noqa + from matplotlib.gridspec import GridSpecFromSubplotSpec # noqa + + if isinstance(self.kind, str): + kind = [self.kind] * len(self.features) + else: + kind = self.kind + + if self.is_categorical is None: + is_categorical = [ + (False,) if len(fx) == 1 else (False, False) for fx in self.features + ] + else: + is_categorical = self.is_categorical + + if len(kind) != len(self.features): + raise ValueError( + "When `kind` is provided as a list of strings, it should " + "contain as many elements as `features`. `kind` contains " + f"{len(kind)} element(s) and `features` contains " + f"{len(self.features)} element(s)." + ) + + valid_kinds = {"average", "individual", "both"} + if any([k not in valid_kinds for k in kind]): + raise ValueError( + f"Values provided to `kind` must be one of: {valid_kinds!r} or a list" + f" of such values. Currently, kind={self.kind!r}" + ) + + # Center results before plotting + if not centered: + pd_results_ = self.pd_results + else: + pd_results_ = [] + for kind_plot, pd_result in zip(kind, self.pd_results): + current_results = {"grid_values": pd_result["grid_values"]} + + if kind_plot in ("individual", "both"): + preds = pd_result.individual + preds = preds - preds[self.target_idx, :, 0, None] + current_results["individual"] = preds + + if kind_plot in ("average", "both"): + avg_preds = pd_result.average + avg_preds = avg_preds - avg_preds[self.target_idx, 0, None] + current_results["average"] = avg_preds + + pd_results_.append(Bunch(**current_results)) + + if pdp_lim is None: + # get global min and max average predictions of PD grouped by plot type + pdp_lim = {} + for kind_plot, pdp in zip(kind, pd_results_): + values = pdp["grid_values"] + preds = pdp.average if kind_plot == "average" else pdp.individual + min_pd = preds[self.target_idx].min() + max_pd = preds[self.target_idx].max() + + # expand the limits to account so that the plotted lines do not touch + # the edges of the plot + span = max_pd - min_pd + min_pd -= 0.05 * span + max_pd += 0.05 * span + + n_fx = len(values) + old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) + min_pd = min(min_pd, old_min_pd) + max_pd = max(max_pd, old_max_pd) + pdp_lim[n_fx] = (min_pd, max_pd) + + if line_kw is None: + line_kw = {} + if ice_lines_kw is None: + ice_lines_kw = {} + if pd_line_kw is None: + pd_line_kw = {} + if bar_kw is None: + bar_kw = {} + if heatmap_kw is None: + heatmap_kw = {} + + if ax is None: + _, ax = plt.subplots() + + if contour_kw is None: + contour_kw = {} + default_contour_kws = {"alpha": 0.75} + contour_kw = {**default_contour_kws, **contour_kw} + + n_features = len(self.features) + is_average_plot = [kind_plot == "average" for kind_plot in kind] + if all(is_average_plot): + # only average plots are requested + n_ice_lines = 0 + n_lines = 1 + else: + # we need to determine the number of ICE samples computed + ice_plot_idx = is_average_plot.index(False) + n_ice_lines = self._get_sample_count( + len(pd_results_[ice_plot_idx].individual[0]) + ) + if any([kind_plot == "both" for kind_plot in kind]): + n_lines = n_ice_lines + 1 # account for the average line + else: + n_lines = n_ice_lines + + if isinstance(ax, plt.Axes): + # If ax was set off, it has most likely been set to off + # by a previous call to plot. + if not ax.axison: + raise ValueError( + "The ax was already used in another plot " + "function, please set ax=display.axes_ " + "instead" + ) + + ax.set_axis_off() + self.bounding_ax_ = ax + self.figure_ = ax.figure + + n_cols = min(n_cols, n_features) + n_rows = int(np.ceil(n_features / float(n_cols))) + + self.axes_ = np.empty((n_rows, n_cols), dtype=object) + if all(is_average_plot): + self.lines_ = np.empty((n_rows, n_cols), dtype=object) + else: + self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object) + self.contours_ = np.empty((n_rows, n_cols), dtype=object) + self.bars_ = np.empty((n_rows, n_cols), dtype=object) + self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object) + + axes_ravel = self.axes_.ravel() + + gs = GridSpecFromSubplotSpec( + n_rows, n_cols, subplot_spec=ax.get_subplotspec() + ) + for i, spec in zip(range(n_features), gs): + axes_ravel[i] = self.figure_.add_subplot(spec) + + else: # array-like + ax = np.asarray(ax, dtype=object) + if ax.size != n_features: + raise ValueError( + "Expected ax to have {} axes, got {}".format(n_features, ax.size) + ) + + if ax.ndim == 2: + n_cols = ax.shape[1] + else: + n_cols = None + + self.bounding_ax_ = None + self.figure_ = ax.ravel()[0].figure + self.axes_ = ax + if all(is_average_plot): + self.lines_ = np.empty_like(ax, dtype=object) + else: + self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object) + self.contours_ = np.empty_like(ax, dtype=object) + self.bars_ = np.empty_like(ax, dtype=object) + self.heatmaps_ = np.empty_like(ax, dtype=object) + + # create contour levels for two-way plots + if 2 in pdp_lim: + Z_level = np.linspace(*pdp_lim[2], num=8) + + self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object) + self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object) + + for pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot) in enumerate( + zip( + self.axes_.ravel(), + self.features, + is_categorical, + pd_results_, + kind, + ) + ): + avg_preds = None + preds = None + feature_values = pd_result["grid_values"] + if kind_plot == "individual": + preds = pd_result.individual + elif kind_plot == "average": + avg_preds = pd_result.average + else: # kind_plot == 'both' + avg_preds = pd_result.average + preds = pd_result.individual + + if len(feature_values) == 1: + # define the line-style for the current plot + default_line_kws = { + "color": "C0", + "label": "average" if kind_plot == "both" else None, + } + if kind_plot == "individual": + default_ice_lines_kws = {"alpha": 0.3, "linewidth": 0.5} + default_pd_lines_kws = {} + elif kind_plot == "both": + # by default, we need to distinguish the average line from + # the individual lines via color and line style + default_ice_lines_kws = { + "alpha": 0.3, + "linewidth": 0.5, + "color": "tab:blue", + } + default_pd_lines_kws = { + "color": "tab:orange", + "linestyle": "--", + } + else: + default_ice_lines_kws = {} + default_pd_lines_kws = {} + + ice_lines_kw = { + **default_line_kws, + **default_ice_lines_kws, + **line_kw, + **ice_lines_kw, + } + del ice_lines_kw["label"] + + pd_line_kw = { + **default_line_kws, + **default_pd_lines_kws, + **line_kw, + **pd_line_kw, + } + + default_bar_kws = {"color": "C0"} + bar_kw = {**default_bar_kws, **bar_kw} + + default_heatmap_kw = {} + heatmap_kw = {**default_heatmap_kw, **heatmap_kw} + + self._plot_one_way_partial_dependence( + kind_plot, + preds, + avg_preds, + feature_values[0], + feature_idx, + n_ice_lines, + axi, + n_cols, + pd_plot_idx, + n_lines, + ice_lines_kw, + pd_line_kw, + cat[0], + bar_kw, + pdp_lim, + ) + else: + self._plot_two_way_partial_dependence( + avg_preds, + feature_values, + feature_idx, + axi, + pd_plot_idx, + Z_level, + contour_kw, + cat[0] and cat[1], + heatmap_kw, + ) + + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb38f55445a08a28c1415db8a8b02cd2bd4c2dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py @@ -0,0 +1,609 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_multilabel_classification, +) +from sklearn.ensemble import IsolationForest +from sklearn.inspection import DecisionBoundaryDisplay +from sklearn.inspection._plot.decision_boundary import _check_boundary_response_method +from sklearn.linear_model import LogisticRegression +from sklearn.preprocessing import scale +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, +) + +# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved +pytestmark = pytest.mark.filterwarnings( + "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:" + "matplotlib.*" +) + + +X, y = make_classification( + n_informative=1, + n_redundant=1, + n_clusters_per_class=1, + n_features=2, + random_state=42, +) + + +def load_iris_2d_scaled(): + X, y = load_iris(return_X_y=True) + X = scale(X)[:, :2] + return X, y + + +@pytest.fixture(scope="module") +def fitted_clf(): + return LogisticRegression().fit(X, y) + + +def test_input_data_dimension(pyplot): + """Check that we raise an error when `X` does not have exactly 2 features.""" + X, y = make_classification(n_samples=10, n_features=4, random_state=0) + + clf = LogisticRegression().fit(X, y) + msg = "n_features must be equal to 2. Got 4 instead." + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator(estimator=clf, X=X) + + +def test_check_boundary_response_method_error(): + """Check that we raise an error for the cases not supported by + `_check_boundary_response_method`. + """ + + class MultiLabelClassifier: + classes_ = [np.array([0, 1]), np.array([0, 1])] + + err_msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=err_msg): + _check_boundary_response_method(MultiLabelClassifier(), "predict", None) + + class MulticlassClassifier: + classes_ = [0, 1, 2] + + err_msg = "Multiclass classifiers are only supported when `response_method` is" + for response_method in ("predict_proba", "decision_function"): + with pytest.raises(ValueError, match=err_msg): + _check_boundary_response_method( + MulticlassClassifier(), response_method, None + ) + + +@pytest.mark.parametrize( + "estimator, response_method, class_of_interest, expected_prediction_method", + [ + (DecisionTreeRegressor(), "predict", None, "predict"), + (DecisionTreeRegressor(), "auto", None, "predict"), + (LogisticRegression().fit(*load_iris_2d_scaled()), "predict", None, "predict"), + (LogisticRegression().fit(*load_iris_2d_scaled()), "auto", None, "predict"), + ( + LogisticRegression().fit(*load_iris_2d_scaled()), + "predict_proba", + 0, + "predict_proba", + ), + ( + LogisticRegression().fit(*load_iris_2d_scaled()), + "decision_function", + 0, + "decision_function", + ), + ( + LogisticRegression().fit(X, y), + "auto", + None, + ["decision_function", "predict_proba", "predict"], + ), + (LogisticRegression().fit(X, y), "predict", None, "predict"), + ( + LogisticRegression().fit(X, y), + ["predict_proba", "decision_function"], + None, + ["predict_proba", "decision_function"], + ), + ], +) +def test_check_boundary_response_method( + estimator, response_method, class_of_interest, expected_prediction_method +): + """Check the behaviour of `_check_boundary_response_method` for the supported + cases. + """ + prediction_method = _check_boundary_response_method( + estimator, response_method, class_of_interest + ) + assert prediction_method == expected_prediction_method + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_multiclass_error(pyplot, response_method): + """Check multiclass errors.""" + X, y = make_classification(n_classes=3, n_informative=3, random_state=0) + X = X[:, [0, 1]] + lr = LogisticRegression().fit(X, y) + + msg = ( + "Multiclass classifiers are only supported when `response_method` is 'predict'" + " or 'auto'" + ) + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator(lr, X, response_method=response_method) + + +@pytest.mark.parametrize("response_method", ["auto", "predict"]) +def test_multiclass(pyplot, response_method): + """Check multiclass gives expected results.""" + grid_resolution = 10 + eps = 1.0 + X, y = make_classification(n_classes=3, n_informative=3, random_state=0) + X = X[:, [0, 1]] + lr = LogisticRegression(random_state=0).fit(X, y) + + disp = DecisionBoundaryDisplay.from_estimator( + lr, X, response_method=response_method, grid_resolution=grid_resolution, eps=1.0 + ) + + x0_min, x0_max = X[:, 0].min() - eps, X[:, 0].max() + eps + x1_min, x1_max = X[:, 1].min() - eps, X[:, 1].max() + eps + xx0, xx1 = np.meshgrid( + np.linspace(x0_min, x0_max, grid_resolution), + np.linspace(x1_min, x1_max, grid_resolution), + ) + response = lr.predict(np.c_[xx0.ravel(), xx1.ravel()]) + assert_allclose(disp.response, response.reshape(xx0.shape)) + assert_allclose(disp.xx0, xx0) + assert_allclose(disp.xx1, xx1) + + +@pytest.mark.parametrize( + "kwargs, error_msg", + [ + ( + {"plot_method": "hello_world"}, + r"plot_method must be one of contourf, contour, pcolormesh. Got hello_world" + r" instead.", + ), + ( + {"grid_resolution": 1}, + r"grid_resolution must be greater than 1. Got 1 instead", + ), + ( + {"grid_resolution": -1}, + r"grid_resolution must be greater than 1. Got -1 instead", + ), + ({"eps": -1.1}, r"eps must be greater than or equal to 0. Got -1.1 instead"), + ], +) +def test_input_validation_errors(pyplot, kwargs, error_msg, fitted_clf): + """Check input validation from_estimator.""" + with pytest.raises(ValueError, match=error_msg): + DecisionBoundaryDisplay.from_estimator(fitted_clf, X, **kwargs) + + +def test_display_plot_input_error(pyplot, fitted_clf): + """Check input validation for `plot`.""" + disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, X, grid_resolution=5) + + with pytest.raises(ValueError, match="plot_method must be 'contourf'"): + disp.plot(plot_method="hello_world") + + +@pytest.mark.parametrize( + "response_method", ["auto", "predict", "predict_proba", "decision_function"] +) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_classifier( + pyplot, fitted_clf, response_method, plot_method +): + """Check that decision boundary is correct.""" + fig, ax = pyplot.subplots() + eps = 2.0 + disp = DecisionBoundaryDisplay.from_estimator( + fitted_clf, + X, + grid_resolution=5, + response_method=response_method, + plot_method=plot_method, + eps=eps, + ax=ax, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + fig2, ax2 = pyplot.subplots() + # change plotting method for second plot + disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") + assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) + assert disp.ax_ == ax2 + assert disp.figure_ == fig2 + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "decision_function"]) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_outlier_detector( + pyplot, response_method, plot_method +): + """Check that decision boundary is correct for outlier detector.""" + fig, ax = pyplot.subplots() + eps = 2.0 + outlier_detector = IsolationForest(random_state=0).fit(X, y) + disp = DecisionBoundaryDisplay.from_estimator( + outlier_detector, + X, + grid_resolution=5, + response_method=response_method, + plot_method=plot_method, + eps=eps, + ax=ax, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + +@pytest.mark.parametrize("response_method", ["auto", "predict"]) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_regressor(pyplot, response_method, plot_method): + """Check that we can display the decision boundary for a regressor.""" + X, y = load_diabetes(return_X_y=True) + X = X[:, :2] + tree = DecisionTreeRegressor().fit(X, y) + fig, ax = pyplot.subplots() + eps = 2.0 + disp = DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ax=ax, + eps=eps, + plot_method=plot_method, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + fig2, ax2 = pyplot.subplots() + # change plotting method for second plot + disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") + assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) + assert disp.ax_ == ax2 + assert disp.figure_ == fig2 + + +@pytest.mark.parametrize( + "response_method, msg", + [ + ( + "predict_proba", + "MyClassifier has none of the following attributes: predict_proba", + ), + ( + "decision_function", + "MyClassifier has none of the following attributes: decision_function", + ), + ( + "auto", + ( + "MyClassifier has none of the following attributes: decision_function, " + "predict_proba, predict" + ), + ), + ( + "bad_method", + "MyClassifier has none of the following attributes: bad_method", + ), + ], +) +def test_error_bad_response(pyplot, response_method, msg): + """Check errors for bad response.""" + + class MyClassifier(BaseEstimator, ClassifierMixin): + def fit(self, X, y): + self.fitted_ = True + self.classes_ = [0, 1] + return self + + clf = MyClassifier().fit(X, y) + + with pytest.raises(AttributeError, match=msg): + DecisionBoundaryDisplay.from_estimator(clf, X, response_method=response_method) + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"]) +def test_multilabel_classifier_error(pyplot, response_method): + """Check that multilabel classifier raises correct error.""" + X, y = make_multilabel_classification(random_state=0) + X = X[:, :2] + tree = DecisionTreeClassifier().fit(X, y) + + msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ) + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"]) +def test_multi_output_multi_class_classifier_error(pyplot, response_method): + """Check that multi-output multi-class classifier raises correct error.""" + X = np.asarray([[0, 1], [1, 2]]) + y = np.asarray([["tree", "cat"], ["cat", "tree"]]) + tree = DecisionTreeClassifier().fit(X, y) + + msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ) + + +def test_multioutput_regressor_error(pyplot): + """Check that multioutput regressor raises correct error.""" + X = np.asarray([[0, 1], [1, 2]]) + y = np.asarray([[0, 1], [4, 1]]) + tree = DecisionTreeRegressor().fit(X, y) + with pytest.raises(ValueError, match="Multi-output regressors are not supported"): + DecisionBoundaryDisplay.from_estimator(tree, X, response_method="predict") + + +@pytest.mark.parametrize( + "response_method", + ["predict_proba", "decision_function", ["predict_proba", "predict"]], +) +def test_regressor_unsupported_response(pyplot, response_method): + """Check that we can display the decision boundary for a regressor.""" + X, y = load_diabetes(return_X_y=True) + X = X[:, :2] + tree = DecisionTreeRegressor().fit(X, y) + err_msg = "should either be a classifier to be used with response_method" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator(tree, X, response_method=response_method) + + +@pytest.mark.filterwarnings( + # We expect to raise the following warning because the classifier is fit on a + # NumPy array + "ignore:X has feature names, but LogisticRegression was fitted without" +) +def test_dataframe_labels_used(pyplot, fitted_clf): + """Check that column names are used for pandas.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame(X, columns=["col_x", "col_y"]) + + # pandas column names are used by default + _, ax = pyplot.subplots() + disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, df, ax=ax) + assert ax.get_xlabel() == "col_x" + assert ax.get_ylabel() == "col_y" + + # second call to plot will have the names + fig, ax = pyplot.subplots() + disp.plot(ax=ax) + assert ax.get_xlabel() == "col_x" + assert ax.get_ylabel() == "col_y" + + # axes with a label will not get overridden + fig, ax = pyplot.subplots() + ax.set(xlabel="hello", ylabel="world") + disp.plot(ax=ax) + assert ax.get_xlabel() == "hello" + assert ax.get_ylabel() == "world" + + # labels get overridden only if provided to the `plot` method + disp.plot(ax=ax, xlabel="overwritten_x", ylabel="overwritten_y") + assert ax.get_xlabel() == "overwritten_x" + assert ax.get_ylabel() == "overwritten_y" + + # labels do not get inferred if provided to `from_estimator` + _, ax = pyplot.subplots() + disp = DecisionBoundaryDisplay.from_estimator( + fitted_clf, df, ax=ax, xlabel="overwritten_x", ylabel="overwritten_y" + ) + assert ax.get_xlabel() == "overwritten_x" + assert ax.get_ylabel() == "overwritten_y" + + +def test_string_target(pyplot): + """Check that decision boundary works with classifiers trained on string labels.""" + iris = load_iris() + X = iris.data[:, [0, 1]] + + # Use strings as target + y = iris.target_names[iris.target] + log_reg = LogisticRegression().fit(X, y) + + # Does not raise + DecisionBoundaryDisplay.from_estimator( + log_reg, + X, + grid_resolution=5, + response_method="predict", + ) + + +def test_dataframe_support(pyplot): + """Check that passing a dataframe at fit and to the Display does not + raise warnings. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/23311 + """ + pd = pytest.importorskip("pandas") + df = pd.DataFrame(X, columns=["col_x", "col_y"]) + estimator = LogisticRegression().fit(df, y) + + with warnings.catch_warnings(): + # no warnings linked to feature names validation should be raised + warnings.simplefilter("error", UserWarning) + DecisionBoundaryDisplay.from_estimator(estimator, df, response_method="predict") + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_class_of_interest_binary(pyplot, response_method): + """Check the behaviour of passing `class_of_interest` for plotting the output of + `predict_proba` and `decision_function` in the binary case. + """ + iris = load_iris() + X = iris.data[:100, :2] + y = iris.target[:100] + assert_array_equal(np.unique(y), [0, 1]) + + estimator = LogisticRegression().fit(X, y) + # We will check that `class_of_interest=None` is equivalent to + # `class_of_interest=estimator.classes_[1]` + disp_default = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=None, + ) + disp_class_1 = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=estimator.classes_[1], + ) + + assert_allclose(disp_default.response, disp_class_1.response) + + # we can check that `_get_response_values` modifies the response when targeting + # the other class, i.e. 1 - p(y=1|x) for `predict_proba` and -decision_function + # for `decision_function`. + disp_class_0 = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=estimator.classes_[0], + ) + + if response_method == "predict_proba": + assert_allclose(disp_default.response, 1 - disp_class_0.response) + else: + assert response_method == "decision_function" + assert_allclose(disp_default.response, -disp_class_0.response) + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_class_of_interest_multiclass(pyplot, response_method): + """Check the behaviour of passing `class_of_interest` for plotting the output of + `predict_proba` and `decision_function` in the multiclass case. + """ + iris = load_iris() + X = iris.data[:, :2] + y = iris.target # the target are numerical labels + class_of_interest_idx = 2 + + estimator = LogisticRegression().fit(X, y) + disp = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=class_of_interest_idx, + ) + + # we will check that we plot the expected values as response + grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1) + response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx] + assert_allclose(response.reshape(*disp.response.shape), disp.response) + + # make the same test but this time using target as strings + y = iris.target_names[iris.target] + estimator = LogisticRegression().fit(X, y) + + disp = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=iris.target_names[class_of_interest_idx], + ) + + grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1) + response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx] + assert_allclose(response.reshape(*disp.response.shape), disp.response) + + # check that we raise an error for unknown labels + # this test should already be handled in `_get_response_values` but we can have this + # test here as well + err_msg = "class_of_interest=2 is not a valid label: It should be one of" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=class_of_interest_idx, + ) + + # TODO: remove this test when we handle multiclass with class_of_interest=None + # by showing the max of the decision function or the max of the predicted + # probabilities. + err_msg = "Multiclass classifiers are only supported" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=None, + ) + + +def test_subclass_named_constructors_return_type_is_subclass(pyplot): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + clf = LogisticRegression().fit(X, y) + + class SubclassOfDisplay(DecisionBoundaryDisplay): + pass + + curve = SubclassOfDisplay.from_estimator(estimator=clf, X=X) + + assert isinstance(curve, SubclassOfDisplay) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..57fc68d07e887fa18b8c84c08780a77e6712d843 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py @@ -0,0 +1,1140 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy.stats.mstats import mquantiles + +from sklearn.compose import make_column_transformer +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor +from sklearn.inspection import PartialDependenceDisplay +from sklearn.linear_model import LinearRegression +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import OneHotEncoder +from sklearn.utils._testing import _convert_container + +# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved +pytestmark = pytest.mark.filterwarnings( + ( + "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:" + "matplotlib.*" + ), +) + + +@pytest.fixture(scope="module") +def diabetes(): + # diabetes dataset, subsampled for speed + data = load_diabetes() + data.data = data.data[:50] + data.target = data.target[:50] + return data + + +@pytest.fixture(scope="module") +def clf_diabetes(diabetes): + clf = GradientBoostingRegressor(n_estimators=10, random_state=1) + clf.fit(diabetes.data, diabetes.target) + return clf + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize("grid_resolution", [10, 20]) +def test_plot_partial_dependence(grid_resolution, pyplot, clf_diabetes, diabetes): + # Test partial dependence plot function. + # Use columns 0 & 2 as 1 is not quantitative (sex) + feature_names = diabetes.feature_names + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2, (0, 2)], + grid_resolution=grid_resolution, + feature_names=feature_names, + contour_kw={"cmap": "jet"}, + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert disp.figure_ is fig + assert len(axs) == 4 + + assert disp.bounding_ax_ is not None + assert disp.axes_.shape == (1, 3) + assert disp.lines_.shape == (1, 3) + assert disp.contours_.shape == (1, 3) + assert disp.deciles_vlines_.shape == (1, 3) + assert disp.deciles_hlines_.shape == (1, 3) + + assert disp.lines_[0, 2] is None + assert disp.contours_[0, 0] is None + assert disp.contours_[0, 1] is None + + # deciles lines: always show on xaxis, only show on yaxis if 2-way PDP + for i in range(3): + assert disp.deciles_vlines_[0, i] is not None + assert disp.deciles_hlines_[0, 0] is None + assert disp.deciles_hlines_[0, 1] is None + assert disp.deciles_hlines_[0, 2] is not None + + assert disp.features == [(0,), (2,), (0, 2)] + assert np.all(disp.feature_names == feature_names) + assert len(disp.deciles) == 2 + for i in [0, 2]: + assert_allclose( + disp.deciles[i], + mquantiles(diabetes.data[:, i], prob=np.arange(0.1, 1.0, 0.1)), + ) + + single_feature_positions = [(0, (0, 0)), (2, (0, 1))] + expected_ylabels = ["Partial dependence", ""] + + for i, (feat_col, pos) in enumerate(single_feature_positions): + ax = disp.axes_[pos] + assert ax.get_ylabel() == expected_ylabels[i] + assert ax.get_xlabel() == diabetes.feature_names[feat_col] + + line = disp.lines_[pos] + + avg_preds = disp.pd_results[i] + assert avg_preds.average.shape == (1, grid_resolution) + target_idx = disp.target_idx + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # two feature position + ax = disp.axes_[0, 2] + coutour = disp.contours_[0, 2] + assert coutour.get_cmap().name == "jet" + assert ax.get_xlabel() == diabetes.feature_names[0] + assert ax.get_ylabel() == diabetes.feature_names[2] + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "kind, centered, subsample, shape", + [ + ("average", False, None, (1, 3)), + ("individual", False, None, (1, 3, 50)), + ("both", False, None, (1, 3, 51)), + ("individual", False, 20, (1, 3, 20)), + ("both", False, 20, (1, 3, 21)), + ("individual", False, 0.5, (1, 3, 25)), + ("both", False, 0.5, (1, 3, 26)), + ("average", True, None, (1, 3)), + ("individual", True, None, (1, 3, 50)), + ("both", True, None, (1, 3, 51)), + ("individual", True, 20, (1, 3, 20)), + ("both", True, 20, (1, 3, 21)), + ], +) +def test_plot_partial_dependence_kind( + pyplot, + kind, + centered, + subsample, + shape, + clf_diabetes, + diabetes, +): + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1, 2], + kind=kind, + centered=centered, + subsample=subsample, + ) + + assert disp.axes_.shape == (1, 3) + assert disp.lines_.shape == shape + assert disp.contours_.shape == (1, 3) + + assert disp.contours_[0, 0] is None + assert disp.contours_[0, 1] is None + assert disp.contours_[0, 2] is None + + if centered: + assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None]) + else: + assert all([ln._y[0] != 0.0 for ln in disp.lines_.ravel() if ln is not None]) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "input_type, feature_names_type", + [ + ("dataframe", None), + ("dataframe", "list"), + ("list", "list"), + ("array", "list"), + ("dataframe", "array"), + ("list", "array"), + ("array", "array"), + ("dataframe", "series"), + ("list", "series"), + ("array", "series"), + ("dataframe", "index"), + ("list", "index"), + ("array", "index"), + ], +) +def test_plot_partial_dependence_str_features( + pyplot, + clf_diabetes, + diabetes, + input_type, + feature_names_type, +): + if input_type == "dataframe": + pd = pytest.importorskip("pandas") + X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) + elif input_type == "list": + X = diabetes.data.tolist() + else: + X = diabetes.data + + if feature_names_type is None: + feature_names = None + else: + feature_names = _convert_container(diabetes.feature_names, feature_names_type) + + grid_resolution = 25 + # check with str features and array feature names and single column + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + X, + [("age", "bmi"), "bmi"], + grid_resolution=grid_resolution, + feature_names=feature_names, + n_cols=1, + line_kw={"alpha": 0.8}, + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert len(axs) == 3 + + assert disp.figure_ is fig + assert disp.axes_.shape == (2, 1) + assert disp.lines_.shape == (2, 1) + assert disp.contours_.shape == (2, 1) + assert disp.deciles_vlines_.shape == (2, 1) + assert disp.deciles_hlines_.shape == (2, 1) + + assert disp.lines_[0, 0] is None + assert disp.deciles_vlines_[0, 0] is not None + assert disp.deciles_hlines_[0, 0] is not None + assert disp.contours_[1, 0] is None + assert disp.deciles_hlines_[1, 0] is None + assert disp.deciles_vlines_[1, 0] is not None + + # line + ax = disp.axes_[1, 0] + assert ax.get_xlabel() == "bmi" + assert ax.get_ylabel() == "Partial dependence" + + line = disp.lines_[1, 0] + avg_preds = disp.pd_results[1] + target_idx = disp.target_idx + assert line.get_alpha() == 0.8 + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # contour + ax = disp.axes_[0, 0] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "bmi" + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_custom_axes(pyplot, clf_diabetes, diabetes): + grid_resolution = 25 + fig, (ax1, ax2) = pyplot.subplots(1, 2) + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", ("age", "bmi")], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=[ax1, ax2], + ) + assert fig is disp.figure_ + assert disp.bounding_ax_ is None + assert disp.axes_.shape == (2,) + assert disp.axes_[0] is ax1 + assert disp.axes_[1] is ax2 + + ax = disp.axes_[0] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "Partial dependence" + + line = disp.lines_[0] + avg_preds = disp.pd_results[0] + target_idx = disp.target_idx + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # contour + ax = disp.axes_[1] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "bmi" + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "kind, lines", [("average", 1), ("individual", 50), ("both", 51)] +) +def test_plot_partial_dependence_passing_numpy_axes( + pyplot, clf_diabetes, diabetes, kind, lines +): + grid_resolution = 25 + feature_names = diabetes.feature_names + disp1 = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + ) + assert disp1.axes_.shape == (1, 2) + assert disp1.axes_[0, 0].get_ylabel() == "Partial dependence" + assert disp1.axes_[0, 1].get_ylabel() == "" + assert len(disp1.axes_[0, 0].get_lines()) == lines + assert len(disp1.axes_[0, 1].get_lines()) == lines + + lr = LinearRegression() + lr.fit(diabetes.data, diabetes.target) + + disp2 = PartialDependenceDisplay.from_estimator( + lr, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + ax=disp1.axes_, + ) + + assert np.all(disp1.axes_ == disp2.axes_) + assert len(disp2.axes_[0, 0].get_lines()) == 2 * lines + assert len(disp2.axes_[0, 1].get_lines()) == 2 * lines + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize("nrows, ncols", [(2, 2), (3, 1)]) +def test_plot_partial_dependence_incorrent_num_axes( + pyplot, clf_diabetes, diabetes, nrows, ncols +): + grid_resolution = 5 + fig, axes = pyplot.subplots(nrows, ncols) + axes_formats = [list(axes.ravel()), tuple(axes.ravel()), axes] + + msg = "Expected ax to have 2 axes, got {}".format(nrows * ncols) + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ) + + for ax_format in axes_formats: + with pytest.raises(ValueError, match=msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax_format, + ) + + # with axes object + with pytest.raises(ValueError, match=msg): + disp.plot(ax=ax_format) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_with_same_axes(pyplot, clf_diabetes, diabetes): + # The first call to plot_partial_dependence will create two new axes to + # place in the space of the passed in axes, which results in a total of + # three axes in the figure. + # Currently the API does not allow for the second call to + # plot_partial_dependence to use the same axes again, because it will + # create two new axes in the space resulting in five axes. To get the + # expected behavior one needs to pass the generated axes into the second + # call: + # disp1 = plot_partial_dependence(...) + # disp2 = plot_partial_dependence(..., ax=disp1.axes_) + + grid_resolution = 25 + fig, ax = pyplot.subplots() + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax, + ) + + msg = ( + "The ax was already used in another plot function, please set " + "ax=display.axes_ instead" + ) + + with pytest.raises(ValueError, match=msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax, + ) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_feature_name_reuse(pyplot, clf_diabetes, diabetes): + # second call to plot does not change the feature names from the first + # call + + feature_names = diabetes.feature_names + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + grid_resolution=10, + feature_names=feature_names, + ) + + PartialDependenceDisplay.from_estimator( + clf_diabetes, diabetes.data, [0, 1], grid_resolution=10, ax=disp.axes_ + ) + + for i, ax in enumerate(disp.axes_.ravel()): + assert ax.get_xlabel() == feature_names[i] + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_multiclass(pyplot): + grid_resolution = 25 + clf_int = GradientBoostingClassifier(n_estimators=10, random_state=1) + iris = load_iris() + + # Test partial dependence plot function on multi-class input. + clf_int.fit(iris.data, iris.target) + disp_target_0 = PartialDependenceDisplay.from_estimator( + clf_int, iris.data, [0, 3], target=0, grid_resolution=grid_resolution + ) + assert disp_target_0.figure_ is pyplot.gcf() + assert disp_target_0.axes_.shape == (1, 2) + assert disp_target_0.lines_.shape == (1, 2) + assert disp_target_0.contours_.shape == (1, 2) + assert disp_target_0.deciles_vlines_.shape == (1, 2) + assert disp_target_0.deciles_hlines_.shape == (1, 2) + assert all(c is None for c in disp_target_0.contours_.flat) + assert disp_target_0.target_idx == 0 + + # now with symbol labels + target = iris.target_names[iris.target] + clf_symbol = GradientBoostingClassifier(n_estimators=10, random_state=1) + clf_symbol.fit(iris.data, target) + disp_symbol = PartialDependenceDisplay.from_estimator( + clf_symbol, iris.data, [0, 3], target="setosa", grid_resolution=grid_resolution + ) + assert disp_symbol.figure_ is pyplot.gcf() + assert disp_symbol.axes_.shape == (1, 2) + assert disp_symbol.lines_.shape == (1, 2) + assert disp_symbol.contours_.shape == (1, 2) + assert disp_symbol.deciles_vlines_.shape == (1, 2) + assert disp_symbol.deciles_hlines_.shape == (1, 2) + assert all(c is None for c in disp_symbol.contours_.flat) + assert disp_symbol.target_idx == 0 + + for int_result, symbol_result in zip( + disp_target_0.pd_results, disp_symbol.pd_results + ): + assert_allclose(int_result.average, symbol_result.average) + assert_allclose(int_result["grid_values"], symbol_result["grid_values"]) + + # check that the pd plots are different for another target + disp_target_1 = PartialDependenceDisplay.from_estimator( + clf_int, iris.data, [0, 3], target=1, grid_resolution=grid_resolution + ) + target_0_data_y = disp_target_0.lines_[0, 0].get_data()[1] + target_1_data_y = disp_target_1.lines_[0, 0].get_data()[1] + assert any(target_0_data_y != target_1_data_y) + + +multioutput_regression_data = make_regression(n_samples=50, n_targets=2, random_state=0) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize("target", [0, 1]) +def test_plot_partial_dependence_multioutput(pyplot, target): + # Test partial dependence plot function on multi-output input. + X, y = multioutput_regression_data + clf = LinearRegression().fit(X, y) + + grid_resolution = 25 + disp = PartialDependenceDisplay.from_estimator( + clf, X, [0, 1], target=target, grid_resolution=grid_resolution + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert len(axs) == 3 + assert disp.target_idx == target + assert disp.bounding_ax_ is not None + + positions = [(0, 0), (0, 1)] + expected_label = ["Partial dependence", ""] + + for i, pos in enumerate(positions): + ax = disp.axes_[pos] + assert ax.get_ylabel() == expected_label[i] + assert ax.get_xlabel() == f"x{i}" + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +def test_plot_partial_dependence_dataframe(pyplot, clf_diabetes, diabetes): + pd = pytest.importorskip("pandas") + df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) + + grid_resolution = 25 + + PartialDependenceDisplay.from_estimator( + clf_diabetes, + df, + ["bp", "s1"], + grid_resolution=grid_resolution, + feature_names=df.columns.tolist(), + ) + + +dummy_classification_data = make_classification(random_state=0) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "data, params, err_msg", + [ + ( + multioutput_regression_data, + {"target": None, "features": [0]}, + "target must be specified for multi-output", + ), + ( + multioutput_regression_data, + {"target": -1, "features": [0]}, + r"target must be in \[0, n_tasks\]", + ), + ( + multioutput_regression_data, + {"target": 100, "features": [0]}, + r"target must be in \[0, n_tasks\]", + ), + ( + dummy_classification_data, + {"features": ["foobar"], "feature_names": None}, + "Feature 'foobar' not in feature_names", + ), + ( + dummy_classification_data, + {"features": ["foobar"], "feature_names": ["abcd", "def"]}, + "Feature 'foobar' not in feature_names", + ), + ( + dummy_classification_data, + {"features": [(1, 2, 3)]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [1, {}]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [tuple()]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [123], "feature_names": ["blahblah"]}, + "All entries of features must be less than ", + ), + ( + dummy_classification_data, + {"features": [0, 1, 2], "feature_names": ["a", "b", "a"]}, + "feature_names should not contain duplicates", + ), + ( + dummy_classification_data, + {"features": [1, 2], "kind": ["both"]}, + "When `kind` is provided as a list of strings, it should contain", + ), + ( + dummy_classification_data, + {"features": [1], "subsample": -1}, + "When an integer, subsample=-1 should be positive.", + ), + ( + dummy_classification_data, + {"features": [1], "subsample": 1.2}, + r"When a floating-point, subsample=1.2 should be in the \(0, 1\) range", + ), + ( + dummy_classification_data, + {"features": [1, 2], "categorical_features": [1.0, 2.0]}, + "Expected `categorical_features` to be an array-like of boolean,", + ), + ( + dummy_classification_data, + {"features": [(1, 2)], "categorical_features": [2]}, + "Two-way partial dependence plots are not supported for pairs", + ), + ( + dummy_classification_data, + {"features": [1], "categorical_features": [1], "kind": "individual"}, + "It is not possible to display individual effects", + ), + ], +) +def test_plot_partial_dependence_error(pyplot, data, params, err_msg): + X, y = data + estimator = LinearRegression().fit(X, y) + + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator(estimator, X, **params) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"target": 4, "features": [0]}, "target not in est.classes_, got 4"), + ({"target": None, "features": [0]}, "target must be specified for multi-class"), + ( + {"target": 1, "features": [4.5]}, + "Each entry in features must be either an int,", + ), + ], +) +def test_plot_partial_dependence_multiclass_error(pyplot, params, err_msg): + iris = load_iris() + clf = GradientBoostingClassifier(n_estimators=10, random_state=1) + clf.fit(iris.data, iris.target) + + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator(clf, iris.data, **params) + + +def test_plot_partial_dependence_does_not_override_ylabel( + pyplot, clf_diabetes, diabetes +): + # Non-regression test to be sure to not override the ylabel if it has been + # See https://github.com/scikit-learn/scikit-learn/issues/15772 + _, axes = pyplot.subplots(1, 2) + axes[0].set_ylabel("Hello world") + PartialDependenceDisplay.from_estimator( + clf_diabetes, diabetes.data, [0, 1], ax=axes + ) + + assert axes[0].get_ylabel() == "Hello world" + assert axes[1].get_ylabel() == "Partial dependence" + + +@pytest.mark.parametrize( + "categorical_features, array_type", + [ + (["col_A", "col_C"], "dataframe"), + ([0, 2], "array"), + ([True, False, True], "array"), + ], +) +def test_plot_partial_dependence_with_categorical( + pyplot, categorical_features, array_type +): + X = [[1, 1, "A"], [2, 0, "C"], [3, 2, "B"]] + column_name = ["col_A", "col_B", "col_C"] + X = _convert_container(X, array_type, columns_name=column_name) + y = np.array([1.2, 0.5, 0.45]).T + + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + # single feature + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_C"], + feature_names=column_name, + categorical_features=categorical_features, + ) + + assert disp.figure_ is pyplot.gcf() + assert disp.bars_.shape == (1, 1) + assert disp.bars_[0][0] is not None + assert disp.lines_.shape == (1, 1) + assert disp.lines_[0][0] is None + assert disp.contours_.shape == (1, 1) + assert disp.contours_[0][0] is None + assert disp.deciles_vlines_.shape == (1, 1) + assert disp.deciles_vlines_[0][0] is None + assert disp.deciles_hlines_.shape == (1, 1) + assert disp.deciles_hlines_[0][0] is None + assert disp.axes_[0, 0].get_legend() is None + + # interaction between two features + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=[("col_A", "col_C")], + feature_names=column_name, + categorical_features=categorical_features, + ) + + assert disp.figure_ is pyplot.gcf() + assert disp.bars_.shape == (1, 1) + assert disp.bars_[0][0] is None + assert disp.lines_.shape == (1, 1) + assert disp.lines_[0][0] is None + assert disp.contours_.shape == (1, 1) + assert disp.contours_[0][0] is None + assert disp.deciles_vlines_.shape == (1, 1) + assert disp.deciles_vlines_[0][0] is None + assert disp.deciles_hlines_.shape == (1, 1) + assert disp.deciles_hlines_[0][0] is None + assert disp.axes_[0, 0].get_legend() is None + + +def test_plot_partial_dependence_legend(pyplot): + pd = pytest.importorskip("pandas") + X = pd.DataFrame( + { + "col_A": ["A", "B", "C"], + "col_B": [1, 0, 2], + "col_C": ["C", "B", "A"], + } + ) + y = np.array([1.2, 0.5, 0.45]).T + + categorical_features = ["col_A", "col_C"] + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_B", "col_C"], + categorical_features=categorical_features, + kind=["both", "average"], + ) + + legend_text = disp.axes_[0, 0].get_legend().get_texts() + assert len(legend_text) == 1 + assert legend_text[0].get_text() == "average" + assert disp.axes_[0, 1].get_legend() is None + + +@pytest.mark.parametrize( + "kind, expected_shape", + [("average", (1, 2)), ("individual", (1, 2, 20)), ("both", (1, 2, 21))], +) +def test_plot_partial_dependence_subsampling( + pyplot, clf_diabetes, diabetes, kind, expected_shape +): + # check that the subsampling is properly working + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/pull/18359 + matplotlib = pytest.importorskip("matplotlib") + grid_resolution = 25 + feature_names = diabetes.feature_names + + disp1 = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + subsample=20, + random_state=0, + ) + + assert disp1.lines_.shape == expected_shape + assert all( + [isinstance(line, matplotlib.lines.Line2D) for line in disp1.lines_.ravel()] + ) + + +@pytest.mark.parametrize( + "kind, line_kw, label", + [ + ("individual", {}, None), + ("individual", {"label": "xxx"}, None), + ("average", {}, None), + ("average", {"label": "xxx"}, "xxx"), + ("both", {}, "average"), + ("both", {"label": "xxx"}, "xxx"), + ], +) +def test_partial_dependence_overwrite_labels( + pyplot, + clf_diabetes, + diabetes, + kind, + line_kw, + label, +): + """Test that make sure that we can overwrite the label of the PDP plot""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2], + grid_resolution=25, + feature_names=diabetes.feature_names, + kind=kind, + line_kw=line_kw, + ) + + for ax in disp.axes_.ravel(): + if label is None: + assert ax.get_legend() is None + else: + legend_text = ax.get_legend().get_texts() + assert len(legend_text) == 1 + assert legend_text[0].get_text() == label + + +@pytest.mark.parametrize( + "categorical_features, array_type", + [ + (["col_A", "col_C"], "dataframe"), + ([0, 2], "array"), + ([True, False, True], "array"), + ], +) +def test_grid_resolution_with_categorical(pyplot, categorical_features, array_type): + """Check that we raise a ValueError when the grid_resolution is too small + respect to the number of categories in the categorical features targeted. + """ + X = [["A", 1, "A"], ["B", 0, "C"], ["C", 2, "B"]] + column_name = ["col_A", "col_B", "col_C"] + X = _convert_container(X, array_type, columns_name=column_name) + y = np.array([1.2, 0.5, 0.45]).T + + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + err_msg = ( + "resolution of the computed grid is less than the minimum number of categories" + ) + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_C"], + feature_names=column_name, + categorical_features=categorical_features, + grid_resolution=2, + ) + + +@pytest.mark.parametrize("kind", ["individual", "average", "both"]) +@pytest.mark.parametrize("centered", [True, False]) +def test_partial_dependence_plot_limits_one_way( + pyplot, clf_diabetes, diabetes, kind, centered +): + """Check that the PD limit on the plots are properly set on one-way plots.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=(0, 1), + kind=kind, + grid_resolution=25, + feature_names=diabetes.feature_names, + ) + + range_pd = np.array([-1, 1], dtype=np.float64) + for pd in disp.pd_results: + if "average" in pd: + pd["average"][...] = range_pd[1] + pd["average"][0, 0] = range_pd[0] + if "individual" in pd: + pd["individual"][...] = range_pd[1] + pd["individual"][0, 0, 0] = range_pd[0] + + disp.plot(centered=centered) + # check that we anchor to zero x-axis when centering + y_lim = range_pd - range_pd[0] if centered else range_pd + padding = 0.05 * (y_lim[1] - y_lim[0]) + y_lim[0] -= padding + y_lim[1] += padding + for ax in disp.axes_.ravel(): + assert_allclose(ax.get_ylim(), y_lim) + + +@pytest.mark.parametrize("centered", [True, False]) +def test_partial_dependence_plot_limits_two_way( + pyplot, clf_diabetes, diabetes, centered +): + """Check that the PD limit on the plots are properly set on two-way plots.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[(0, 1)], + kind="average", + grid_resolution=25, + feature_names=diabetes.feature_names, + ) + + range_pd = np.array([-1, 1], dtype=np.float64) + for pd in disp.pd_results: + pd["average"][...] = range_pd[1] + pd["average"][0, 0] = range_pd[0] + + disp.plot(centered=centered) + contours = disp.contours_[0, 0] + levels = range_pd - range_pd[0] if centered else range_pd + + padding = 0.05 * (levels[1] - levels[0]) + levels[0] -= padding + levels[1] += padding + expect_levels = np.linspace(*levels, num=8) + assert_allclose(contours.levels, expect_levels) + + +def test_partial_dependence_kind_list( + pyplot, + clf_diabetes, + diabetes, +): + """Check that we can provide a list of strings to kind parameter.""" + matplotlib = pytest.importorskip("matplotlib") + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[0, 2, (1, 2)], + grid_resolution=20, + kind=["both", "both", "average"], + ) + + for idx in [0, 1]: + assert all( + [ + isinstance(line, matplotlib.lines.Line2D) + for line in disp.lines_[0, idx].ravel() + ] + ) + assert disp.contours_[0, idx] is None + + assert disp.contours_[0, 2] is not None + assert all([line is None for line in disp.lines_[0, 2].ravel()]) + + +@pytest.mark.parametrize( + "features, kind", + [ + ([0, 2, (1, 2)], "individual"), + ([0, 2, (1, 2)], "both"), + ([(0, 1), (0, 2), (1, 2)], "individual"), + ([(0, 1), (0, 2), (1, 2)], "both"), + ([0, 2, (1, 2)], ["individual", "individual", "individual"]), + ([0, 2, (1, 2)], ["both", "both", "both"]), + ], +) +def test_partial_dependence_kind_error( + pyplot, + clf_diabetes, + diabetes, + features, + kind, +): + """Check that we raise an informative error when 2-way PD is requested + together with 1-way PD/ICE""" + warn_msg = ( + "ICE plot cannot be rendered for 2-way feature interactions. 2-way " + "feature interactions mandates PD plots using the 'average' kind" + ) + with pytest.raises(ValueError, match=warn_msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=features, + grid_resolution=20, + kind=kind, + ) + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "line_kw, pd_line_kw, ice_lines_kw, expected_colors", + [ + ({"color": "r"}, {"color": "g"}, {"color": "b"}, ("g", "b")), + (None, {"color": "g"}, {"color": "b"}, ("g", "b")), + ({"color": "r"}, None, {"color": "b"}, ("r", "b")), + ({"color": "r"}, {"color": "g"}, None, ("g", "r")), + ({"color": "r"}, None, None, ("r", "r")), + ({"color": "r"}, {"linestyle": "--"}, {"linestyle": "-."}, ("r", "r")), + ], +) +def test_plot_partial_dependence_lines_kw( + pyplot, + clf_diabetes, + diabetes, + line_kw, + pd_line_kw, + ice_lines_kw, + expected_colors, +): + """Check that passing `pd_line_kw` and `ice_lines_kw` will act on the + specific lines in the plot. + """ + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2], + grid_resolution=20, + feature_names=diabetes.feature_names, + n_cols=2, + kind="both", + line_kw=line_kw, + pd_line_kw=pd_line_kw, + ice_lines_kw=ice_lines_kw, + ) + + line = disp.lines_[0, 0, -1] + assert line.get_color() == expected_colors[0] + if pd_line_kw is not None and "linestyle" in pd_line_kw: + assert line.get_linestyle() == pd_line_kw["linestyle"] + else: + assert line.get_linestyle() == "--" + + line = disp.lines_[0, 0, 0] + assert line.get_color() == expected_colors[1] + if ice_lines_kw is not None and "linestyle" in ice_lines_kw: + assert line.get_linestyle() == ice_lines_kw["linestyle"] + else: + assert line.get_linestyle() == "-" + + +def test_partial_dependence_display_wrong_len_kind( + pyplot, + clf_diabetes, + diabetes, +): + """Check that we raise an error when `kind` is a list with a wrong length. + + This case can only be triggered using the `PartialDependenceDisplay.from_estimator` + method. + """ + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[0, 2], + grid_resolution=20, + kind="average", # len(kind) != len(features) + ) + + # alter `kind` to be a list with a length different from length of `features` + disp.kind = ["average"] + err_msg = ( + r"When `kind` is provided as a list of strings, it should contain as many" + r" elements as `features`. `kind` contains 1 element\(s\) and `features`" + r" contains 2 element\(s\)." + ) + with pytest.raises(ValueError, match=err_msg): + disp.plot() + + +@pytest.mark.parametrize( + "kind", + ["individual", "both", "average", ["average", "both"], ["individual", "both"]], +) +def test_partial_dependence_display_kind_centered_interaction( + pyplot, + kind, + clf_diabetes, + diabetes, +): + """Check that we properly center ICE and PD when passing kind as a string and as a + list.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + kind=kind, + centered=True, + subsample=5, + ) + + assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None]) + + +def test_partial_dependence_display_with_constant_sample_weight( + pyplot, + clf_diabetes, + diabetes, +): + """Check that the utilization of a constant sample weight maintains the + standard behavior. + """ + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + kind="average", + method="brute", + ) + + sample_weight = np.ones_like(diabetes.target) + disp_sw = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + sample_weight=sample_weight, + kind="average", + method="brute", + ) + + assert np.array_equal( + disp.pd_results[0]["average"], disp_sw.pd_results[0]["average"] + ) + + +def test_subclass_named_constructors_return_type_is_subclass( + pyplot, diabetes, clf_diabetes +): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + + class SubclassOfDisplay(PartialDependenceDisplay): + pass + + curve = SubclassOfDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2, (0, 2)], + ) + + assert isinstance(curve, SubclassOfDisplay) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeeb649b2cd6d77b34af34e95c694ecb7d771c0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a893f26626704e8b3628267e769263830507bd83 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7e52566d7e4a754c4147f0b8ab34dc558967a25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe6f2d293845efdea26899d02e58e4095a08f7d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_permutation_importance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..0336dc4b827fea3510560dd406dab948da581817 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py @@ -0,0 +1,958 @@ +""" +Testing for the partial dependence module. +""" +import warnings + +import numpy as np +import pytest + +import sklearn +from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_regressor +from sklearn.cluster import KMeans +from sklearn.compose import make_column_transformer +from sklearn.datasets import load_iris, make_classification, make_regression +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import ( + GradientBoostingClassifier, + GradientBoostingRegressor, + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, + RandomForestRegressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.inspection import partial_dependence +from sklearn.inspection._partial_dependence import ( + _grid_from_X, + _partial_dependence_brute, + _partial_dependence_recursion, +) +from sklearn.linear_model import LinearRegression, LogisticRegression, MultiTaskLasso +from sklearn.metrics import r2_score +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import ( + PolynomialFeatures, + RobustScaler, + StandardScaler, + scale, +) +from sklearn.tree import DecisionTreeRegressor +from sklearn.tree.tests.test_tree import assert_is_subtree +from sklearn.utils import _IS_32BIT +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.validation import check_random_state + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] + + +# (X, y), n_targets <-- as expected in the output of partial_dep() +binary_classification_data = (make_classification(n_samples=50, random_state=0), 1) +multiclass_classification_data = ( + make_classification( + n_samples=50, n_classes=3, n_clusters_per_class=1, random_state=0 + ), + 3, +) +regression_data = (make_regression(n_samples=50, random_state=0), 1) +multioutput_regression_data = ( + make_regression(n_samples=50, n_targets=2, random_state=0), + 2, +) + +# iris +iris = load_iris() + + +@pytest.mark.parametrize( + "Estimator, method, data", + [ + (GradientBoostingClassifier, "auto", binary_classification_data), + (GradientBoostingClassifier, "auto", multiclass_classification_data), + (GradientBoostingClassifier, "brute", binary_classification_data), + (GradientBoostingClassifier, "brute", multiclass_classification_data), + (GradientBoostingRegressor, "auto", regression_data), + (GradientBoostingRegressor, "brute", regression_data), + (DecisionTreeRegressor, "brute", regression_data), + (LinearRegression, "brute", regression_data), + (LinearRegression, "brute", multioutput_regression_data), + (LogisticRegression, "brute", binary_classification_data), + (LogisticRegression, "brute", multiclass_classification_data), + (MultiTaskLasso, "brute", multioutput_regression_data), + ], +) +@pytest.mark.parametrize("grid_resolution", (5, 10)) +@pytest.mark.parametrize("features", ([1], [1, 2])) +@pytest.mark.parametrize("kind", ("average", "individual", "both")) +def test_output_shape(Estimator, method, data, grid_resolution, features, kind): + # Check that partial_dependence has consistent output shape for different + # kinds of estimators: + # - classifiers with binary and multiclass settings + # - regressors + # - multi-task regressors + + est = Estimator() + if hasattr(est, "n_estimators"): + est.set_params(n_estimators=2) # speed-up computations + + # n_target corresponds to the number of classes (1 for binary classif) or + # the number of tasks / outputs in multi task settings. It's equal to 1 for + # classical regression_data. + (X, y), n_targets = data + n_instances = X.shape[0] + + est.fit(X, y) + result = partial_dependence( + est, + X=X, + features=features, + method=method, + kind=kind, + grid_resolution=grid_resolution, + ) + pdp, axes = result, result["grid_values"] + + expected_pdp_shape = (n_targets, *[grid_resolution for _ in range(len(features))]) + expected_ice_shape = ( + n_targets, + n_instances, + *[grid_resolution for _ in range(len(features))], + ) + if kind == "average": + assert pdp.average.shape == expected_pdp_shape + elif kind == "individual": + assert pdp.individual.shape == expected_ice_shape + else: # 'both' + assert pdp.average.shape == expected_pdp_shape + assert pdp.individual.shape == expected_ice_shape + + expected_axes_shape = (len(features), grid_resolution) + assert axes is not None + assert np.asarray(axes).shape == expected_axes_shape + + +def test_grid_from_X(): + # tests for _grid_from_X: sanity check for output, and for shapes. + + # Make sure that the grid is a cartesian product of the input (it will use + # the unique values instead of the percentiles) + percentiles = (0.05, 0.95) + grid_resolution = 100 + is_categorical = [False, False] + X = np.asarray([[1, 2], [3, 4]]) + grid, axes = _grid_from_X(X, percentiles, is_categorical, grid_resolution) + assert_array_equal(grid, [[1, 2], [1, 4], [3, 2], [3, 4]]) + assert_array_equal(axes, X.T) + + # test shapes of returned objects depending on the number of unique values + # for a feature. + rng = np.random.RandomState(0) + grid_resolution = 15 + + # n_unique_values > grid_resolution + X = rng.normal(size=(20, 2)) + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (grid_resolution * grid_resolution, X.shape[1]) + assert np.asarray(axes).shape == (2, grid_resolution) + + # n_unique_values < grid_resolution, will use actual values + n_unique_values = 12 + X[n_unique_values - 1 :, 0] = 12345 + rng.shuffle(X) # just to make sure the order is irrelevant + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (n_unique_values * grid_resolution, X.shape[1]) + # axes is a list of arrays of different shapes + assert axes[0].shape == (n_unique_values,) + assert axes[1].shape == (grid_resolution,) + + +@pytest.mark.parametrize( + "grid_resolution", + [ + 2, # since n_categories > 2, we should not use quantiles resampling + 100, + ], +) +def test_grid_from_X_with_categorical(grid_resolution): + """Check that `_grid_from_X` always sample from categories and does not + depend from the percentiles. + """ + pd = pytest.importorskip("pandas") + percentiles = (0.05, 0.95) + is_categorical = [True] + X = pd.DataFrame({"cat_feature": ["A", "B", "C", "A", "B", "D", "E"]}) + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (5, X.shape[1]) + assert axes[0].shape == (5,) + + +@pytest.mark.parametrize("grid_resolution", [3, 100]) +def test_grid_from_X_heterogeneous_type(grid_resolution): + """Check that `_grid_from_X` always sample from categories and does not + depend from the percentiles. + """ + pd = pytest.importorskip("pandas") + percentiles = (0.05, 0.95) + is_categorical = [True, False] + X = pd.DataFrame( + { + "cat": ["A", "B", "C", "A", "B", "D", "E", "A", "B", "D"], + "num": [1, 1, 1, 2, 5, 6, 6, 6, 6, 8], + } + ) + nunique = X.nunique() + + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + if grid_resolution == 3: + assert grid.shape == (15, 2) + assert axes[0].shape[0] == nunique["num"] + assert axes[1].shape[0] == grid_resolution + else: + assert grid.shape == (25, 2) + assert axes[0].shape[0] == nunique["cat"] + assert axes[1].shape[0] == nunique["cat"] + + +@pytest.mark.parametrize( + "grid_resolution, percentiles, err_msg", + [ + (2, (0, 0.0001), "percentiles are too close"), + (100, (1, 2, 3, 4), "'percentiles' must be a sequence of 2 elements"), + (100, 12345, "'percentiles' must be a sequence of 2 elements"), + (100, (-1, 0.95), r"'percentiles' values must be in \[0, 1\]"), + (100, (0.05, 2), r"'percentiles' values must be in \[0, 1\]"), + (100, (0.9, 0.1), r"percentiles\[0\] must be strictly less than"), + (1, (0.05, 0.95), "'grid_resolution' must be strictly greater than 1"), + ], +) +def test_grid_from_X_error(grid_resolution, percentiles, err_msg): + X = np.asarray([[1, 2], [3, 4]]) + is_categorical = [False] + with pytest.raises(ValueError, match=err_msg): + _grid_from_X(X, percentiles, is_categorical, grid_resolution) + + +@pytest.mark.parametrize("target_feature", range(5)) +@pytest.mark.parametrize( + "est, method", + [ + (LinearRegression(), "brute"), + (GradientBoostingRegressor(random_state=0), "brute"), + (GradientBoostingRegressor(random_state=0), "recursion"), + (HistGradientBoostingRegressor(random_state=0), "brute"), + (HistGradientBoostingRegressor(random_state=0), "recursion"), + ], +) +def test_partial_dependence_helpers(est, method, target_feature): + # Check that what is returned by _partial_dependence_brute or + # _partial_dependence_recursion is equivalent to manually setting a target + # feature to a given value, and computing the average prediction over all + # samples. + # This also checks that the brute and recursion methods give the same + # output. + # Note that even on the trainset, the brute and the recursion methods + # aren't always strictly equivalent, in particular when the slow method + # generates unrealistic samples that have low mass in the joint + # distribution of the input features, and when some of the features are + # dependent. Hence the high tolerance on the checks. + + X, y = make_regression(random_state=0, n_features=5, n_informative=5) + # The 'init' estimator for GBDT (here the average prediction) isn't taken + # into account with the recursion method, for technical reasons. We set + # the mean to 0 to that this 'bug' doesn't have any effect. + y = y - y.mean() + est.fit(X, y) + + # target feature will be set to .5 and then to 123 + features = np.array([target_feature], dtype=np.int32) + grid = np.array([[0.5], [123]]) + + if method == "brute": + pdp, predictions = _partial_dependence_brute( + est, grid, features, X, response_method="auto" + ) + else: + pdp = _partial_dependence_recursion(est, grid, features) + + mean_predictions = [] + for val in (0.5, 123): + X_ = X.copy() + X_[:, target_feature] = val + mean_predictions.append(est.predict(X_).mean()) + + pdp = pdp[0] # (shape is (1, 2) so make it (2,)) + + # allow for greater margin for error with recursion method + rtol = 1e-1 if method == "recursion" else 1e-3 + assert np.allclose(pdp, mean_predictions, rtol=rtol) + + +@pytest.mark.parametrize("seed", range(1)) +def test_recursion_decision_tree_vs_forest_and_gbdt(seed): + # Make sure that the recursion method gives the same results on a + # DecisionTreeRegressor and a GradientBoostingRegressor or a + # RandomForestRegressor with 1 tree and equivalent parameters. + + rng = np.random.RandomState(seed) + + # Purely random dataset to avoid correlated features + n_samples = 1000 + n_features = 5 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) * 10 + + # The 'init' estimator for GBDT (here the average prediction) isn't taken + # into account with the recursion method, for technical reasons. We set + # the mean to 0 to that this 'bug' doesn't have any effect. + y = y - y.mean() + + # set max_depth not too high to avoid splits with same gain but different + # features + max_depth = 5 + + tree_seed = 0 + forest = RandomForestRegressor( + n_estimators=1, + max_features=None, + bootstrap=False, + max_depth=max_depth, + random_state=tree_seed, + ) + # The forest will use ensemble.base._set_random_states to set the + # random_state of the tree sub-estimator. We simulate this here to have + # equivalent estimators. + equiv_random_state = check_random_state(tree_seed).randint(np.iinfo(np.int32).max) + gbdt = GradientBoostingRegressor( + n_estimators=1, + learning_rate=1, + criterion="squared_error", + max_depth=max_depth, + random_state=equiv_random_state, + ) + tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state) + + forest.fit(X, y) + gbdt.fit(X, y) + tree.fit(X, y) + + # sanity check: if the trees aren't the same, the PD values won't be equal + try: + assert_is_subtree(tree.tree_, gbdt[0, 0].tree_) + assert_is_subtree(tree.tree_, forest[0].tree_) + except AssertionError: + # For some reason the trees aren't exactly equal on 32bits, so the PDs + # cannot be equal either. See + # https://github.com/scikit-learn/scikit-learn/issues/8853 + assert _IS_32BIT, "this should only fail on 32 bit platforms" + return + + grid = rng.randn(50).reshape(-1, 1) + for f in range(n_features): + features = np.array([f], dtype=np.int32) + + pdp_forest = _partial_dependence_recursion(forest, grid, features) + pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features) + pdp_tree = _partial_dependence_recursion(tree, grid, features) + + np.testing.assert_allclose(pdp_gbdt, pdp_tree) + np.testing.assert_allclose(pdp_forest, pdp_tree) + + +@pytest.mark.parametrize( + "est", + ( + GradientBoostingClassifier(random_state=0), + HistGradientBoostingClassifier(random_state=0), + ), +) +@pytest.mark.parametrize("target_feature", (0, 1, 2, 3, 4, 5)) +def test_recursion_decision_function(est, target_feature): + # Make sure the recursion method (implicitly uses decision_function) has + # the same result as using brute method with + # response_method=decision_function + + X, y = make_classification(n_classes=2, n_clusters_per_class=1, random_state=1) + assert np.mean(y) == 0.5 # make sure the init estimator predicts 0 anyway + + est.fit(X, y) + + preds_1 = partial_dependence( + est, + X, + [target_feature], + response_method="decision_function", + method="recursion", + kind="average", + ) + preds_2 = partial_dependence( + est, + X, + [target_feature], + response_method="decision_function", + method="brute", + kind="average", + ) + + assert_allclose(preds_1["average"], preds_2["average"], atol=1e-7) + + +@pytest.mark.parametrize( + "est", + ( + LinearRegression(), + GradientBoostingRegressor(random_state=0), + HistGradientBoostingRegressor( + random_state=0, min_samples_leaf=1, max_leaf_nodes=None, max_iter=1 + ), + DecisionTreeRegressor(random_state=0), + ), +) +@pytest.mark.parametrize("power", (1, 2)) +def test_partial_dependence_easy_target(est, power): + # If the target y only depends on one feature in an obvious way (linear or + # quadratic) then the partial dependence for that feature should reflect + # it. + # We here fit a linear regression_data model (with polynomial features if + # needed) and compute r_squared to check that the partial dependence + # correctly reflects the target. + + rng = np.random.RandomState(0) + n_samples = 200 + target_variable = 2 + X = rng.normal(size=(n_samples, 5)) + y = X[:, target_variable] ** power + + est.fit(X, y) + + pdp = partial_dependence( + est, features=[target_variable], X=X, grid_resolution=1000, kind="average" + ) + + new_X = pdp["grid_values"][0].reshape(-1, 1) + new_y = pdp["average"][0] + # add polynomial features if needed + new_X = PolynomialFeatures(degree=power).fit_transform(new_X) + + lr = LinearRegression().fit(new_X, new_y) + r2 = r2_score(new_y, lr.predict(new_X)) + + assert r2 > 0.99 + + +@pytest.mark.parametrize( + "Estimator", + ( + sklearn.tree.DecisionTreeClassifier, + sklearn.tree.ExtraTreeClassifier, + sklearn.ensemble.ExtraTreesClassifier, + sklearn.neighbors.KNeighborsClassifier, + sklearn.neighbors.RadiusNeighborsClassifier, + sklearn.ensemble.RandomForestClassifier, + ), +) +def test_multiclass_multioutput(Estimator): + # Make sure error is raised for multiclass-multioutput classifiers + + # make multiclass-multioutput dataset + X, y = make_classification(n_classes=3, n_clusters_per_class=1, random_state=0) + y = np.array([y, y]).T + + est = Estimator() + est.fit(X, y) + + with pytest.raises( + ValueError, match="Multiclass-multioutput estimators are not supported" + ): + partial_dependence(est, X, [0]) + + +class NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + # simulate that we have some classes + self.classes_ = [0, 1] + return self + + +@pytest.mark.filterwarnings("ignore:A Bunch will be returned") +@pytest.mark.parametrize( + "estimator, params, err_msg", + [ + ( + KMeans(random_state=0, n_init="auto"), + {"features": [0]}, + "'estimator' must be a fitted regressor or classifier", + ), + ( + LinearRegression(), + {"features": [0], "response_method": "predict_proba"}, + "The response_method parameter is ignored for regressors", + ), + ( + GradientBoostingClassifier(random_state=0), + { + "features": [0], + "response_method": "predict_proba", + "method": "recursion", + }, + "'recursion' method, the response_method must be 'decision_function'", + ), + ( + GradientBoostingClassifier(random_state=0), + {"features": [0], "response_method": "predict_proba", "method": "auto"}, + "'recursion' method, the response_method must be 'decision_function'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion", "kind": "individual"}, + "The 'recursion' method only applies when 'kind' is set to 'average'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion", "kind": "both"}, + "The 'recursion' method only applies when 'kind' is set to 'average'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion"}, + "Only the following estimators support the 'recursion' method:", + ), + ], +) +def test_partial_dependence_error(estimator, params, err_msg): + X, y = make_classification(random_state=0) + estimator.fit(X, y) + + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, X, **params) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +@pytest.mark.parametrize("features", [-1, 10000]) +def test_partial_dependence_unknown_feature_indices(estimator, features): + X, y = make_classification(random_state=0) + estimator.fit(X, y) + + err_msg = "all features must be in" + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, X, [features]) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +def test_partial_dependence_unknown_feature_string(estimator): + pd = pytest.importorskip("pandas") + X, y = make_classification(random_state=0) + df = pd.DataFrame(X) + estimator.fit(df, y) + + features = ["random"] + err_msg = "A given column is not a column of the dataframe" + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, df, features) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +def test_partial_dependence_X_list(estimator): + # check that array-like objects are accepted + X, y = make_classification(random_state=0) + estimator.fit(X, y) + partial_dependence(estimator, list(X), [0], kind="average") + + +def test_warning_recursion_non_constant_init(): + # make sure that passing a non-constant init parameter to a GBDT and using + # recursion method yields a warning. + + gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0) + gbc.fit(X, y) + + with pytest.warns( + UserWarning, match="Using recursion method with a non-constant init predictor" + ): + partial_dependence(gbc, X, [0], method="recursion", kind="average") + + with pytest.warns( + UserWarning, match="Using recursion method with a non-constant init predictor" + ): + partial_dependence(gbc, X, [0], method="recursion", kind="average") + + +def test_partial_dependence_sample_weight_of_fitted_estimator(): + # Test near perfect correlation between partial dependence and diagonal + # when sample weights emphasize y = x predictions + # non-regression test for #13193 + # TODO: extend to HistGradientBoosting once sample_weight is supported + N = 1000 + rng = np.random.RandomState(123456) + mask = rng.randint(2, size=N, dtype=bool) + + x = rng.rand(N) + # set y = x on mask and y = -x outside + y = x.copy() + y[~mask] = -y[~mask] + X = np.c_[mask, x] + # sample weights to emphasize data points where y = x + sample_weight = np.ones(N) + sample_weight[mask] = 1000.0 + + clf = GradientBoostingRegressor(n_estimators=10, random_state=1) + clf.fit(X, y, sample_weight=sample_weight) + + pdp = partial_dependence(clf, X, features=[1], kind="average") + + assert np.corrcoef(pdp["average"], pdp["grid_values"])[0, 1] > 0.99 + + +def test_hist_gbdt_sw_not_supported(): + # TODO: remove/fix when PDP supports HGBT with sample weights + clf = HistGradientBoostingRegressor(random_state=1) + clf.fit(X, y, sample_weight=np.ones(len(X))) + + with pytest.raises( + NotImplementedError, match="does not support partial dependence" + ): + partial_dependence(clf, X, features=[1]) + + +def test_partial_dependence_pipeline(): + # check that the partial dependence support pipeline + iris = load_iris() + + scaler = StandardScaler() + clf = DummyClassifier(random_state=42) + pipe = make_pipeline(scaler, clf) + + clf.fit(scaler.fit_transform(iris.data), iris.target) + pipe.fit(iris.data, iris.target) + + features = 0 + pdp_pipe = partial_dependence( + pipe, iris.data, features=[features], grid_resolution=10, kind="average" + ) + pdp_clf = partial_dependence( + clf, + scaler.transform(iris.data), + features=[features], + grid_resolution=10, + kind="average", + ) + assert_allclose(pdp_pipe["average"], pdp_clf["average"]) + assert_allclose( + pdp_pipe["grid_values"][0], + pdp_clf["grid_values"][0] * scaler.scale_[features] + scaler.mean_[features], + ) + + +@pytest.mark.parametrize( + "estimator", + [ + LogisticRegression(max_iter=1000, random_state=0), + GradientBoostingClassifier(random_state=0, n_estimators=5), + ], + ids=["estimator-brute", "estimator-recursion"], +) +@pytest.mark.parametrize( + "preprocessor", + [ + None, + make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + (RobustScaler(), [iris.feature_names[i] for i in (1, 3)]), + ), + make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + remainder="passthrough", + ), + ], + ids=["None", "column-transformer", "column-transformer-passthrough"], +) +@pytest.mark.parametrize( + "features", + [[0, 2], [iris.feature_names[i] for i in (0, 2)]], + ids=["features-integer", "features-string"], +) +def test_partial_dependence_dataframe(estimator, preprocessor, features): + # check that the partial dependence support dataframe and pipeline + # including a column transformer + pd = pytest.importorskip("pandas") + df = pd.DataFrame(scale(iris.data), columns=iris.feature_names) + + pipe = make_pipeline(preprocessor, estimator) + pipe.fit(df, iris.target) + pdp_pipe = partial_dependence( + pipe, df, features=features, grid_resolution=10, kind="average" + ) + + # the column transformer will reorder the column when transforming + # we mixed the index to be sure that we are computing the partial + # dependence of the right columns + if preprocessor is not None: + X_proc = clone(preprocessor).fit_transform(df) + features_clf = [0, 1] + else: + X_proc = df + features_clf = [0, 2] + + clf = clone(estimator).fit(X_proc, iris.target) + pdp_clf = partial_dependence( + clf, + X_proc, + features=features_clf, + method="brute", + grid_resolution=10, + kind="average", + ) + + assert_allclose(pdp_pipe["average"], pdp_clf["average"]) + if preprocessor is not None: + scaler = preprocessor.named_transformers_["standardscaler"] + assert_allclose( + pdp_pipe["grid_values"][1], + pdp_clf["grid_values"][1] * scaler.scale_[1] + scaler.mean_[1], + ) + else: + assert_allclose(pdp_pipe["grid_values"][1], pdp_clf["grid_values"][1]) + + +@pytest.mark.parametrize( + "features, expected_pd_shape", + [ + (0, (3, 10)), + (iris.feature_names[0], (3, 10)), + ([0, 2], (3, 10, 10)), + ([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)), + ([True, False, True, False], (3, 10, 10)), + ], + ids=["scalar-int", "scalar-str", "list-int", "list-str", "mask"], +) +def test_partial_dependence_feature_type(features, expected_pd_shape): + # check all possible features type supported in PDP + pd = pytest.importorskip("pandas") + df = pd.DataFrame(iris.data, columns=iris.feature_names) + + preprocessor = make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + (RobustScaler(), [iris.feature_names[i] for i in (1, 3)]), + ) + pipe = make_pipeline( + preprocessor, LogisticRegression(max_iter=1000, random_state=0) + ) + pipe.fit(df, iris.target) + pdp_pipe = partial_dependence( + pipe, df, features=features, grid_resolution=10, kind="average" + ) + assert pdp_pipe["average"].shape == expected_pd_shape + assert len(pdp_pipe["grid_values"]) == len(pdp_pipe["average"].shape) - 1 + + +@pytest.mark.parametrize( + "estimator", + [ + LinearRegression(), + LogisticRegression(), + GradientBoostingRegressor(), + GradientBoostingClassifier(), + ], +) +def test_partial_dependence_unfitted(estimator): + X = iris.data + preprocessor = make_column_transformer( + (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3]) + ) + pipe = make_pipeline(preprocessor, estimator) + with pytest.raises(NotFittedError, match="is not fitted yet"): + partial_dependence(pipe, X, features=[0, 2], grid_resolution=10) + with pytest.raises(NotFittedError, match="is not fitted yet"): + partial_dependence(estimator, X, features=[0, 2], grid_resolution=10) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_kind_average_and_average_of_individual(Estimator, data): + est = Estimator() + (X, y), n_targets = data + est.fit(X, y) + + pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average") + pdp_ind = partial_dependence(est, X=X, features=[1, 2], kind="individual") + avg_ind = np.mean(pdp_ind["individual"], axis=1) + assert_allclose(avg_ind, pdp_avg["average"]) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_partial_dependence_kind_individual_ignores_sample_weight(Estimator, data): + """Check that `sample_weight` does not have any effect on reported ICE.""" + est = Estimator() + (X, y), n_targets = data + sample_weight = np.arange(X.shape[0]) + est.fit(X, y) + + pdp_nsw = partial_dependence(est, X=X, features=[1, 2], kind="individual") + pdp_sw = partial_dependence( + est, X=X, features=[1, 2], kind="individual", sample_weight=sample_weight + ) + assert_allclose(pdp_nsw["individual"], pdp_sw["individual"]) + assert_allclose(pdp_nsw["grid_values"], pdp_sw["grid_values"]) + + +@pytest.mark.parametrize( + "estimator", + [ + LinearRegression(), + LogisticRegression(), + RandomForestRegressor(), + GradientBoostingClassifier(), + ], +) +@pytest.mark.parametrize("non_null_weight_idx", [0, 1, -1]) +def test_partial_dependence_non_null_weight_idx(estimator, non_null_weight_idx): + """Check that if we pass a `sample_weight` of zeros with only one index with + sample weight equals one, then the average `partial_dependence` with this + `sample_weight` is equal to the individual `partial_dependence` of the + corresponding index. + """ + X, y = iris.data, iris.target + preprocessor = make_column_transformer( + (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3]) + ) + pipe = make_pipeline(preprocessor, estimator).fit(X, y) + + sample_weight = np.zeros_like(y) + sample_weight[non_null_weight_idx] = 1 + pdp_sw = partial_dependence( + pipe, + X, + [2, 3], + kind="average", + sample_weight=sample_weight, + grid_resolution=10, + ) + pdp_ind = partial_dependence(pipe, X, [2, 3], kind="individual", grid_resolution=10) + output_dim = 1 if is_regressor(pipe) else len(np.unique(y)) + for i in range(output_dim): + assert_allclose( + pdp_ind["individual"][i][non_null_weight_idx], + pdp_sw["average"][i], + ) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_partial_dependence_equivalence_equal_sample_weight(Estimator, data): + """Check that `sample_weight=None` is equivalent to having equal weights.""" + + est = Estimator() + (X, y), n_targets = data + est.fit(X, y) + + sample_weight, params = None, {"X": X, "features": [1, 2], "kind": "average"} + pdp_sw_none = partial_dependence(est, **params, sample_weight=sample_weight) + sample_weight = np.ones(len(y)) + pdp_sw_unit = partial_dependence(est, **params, sample_weight=sample_weight) + assert_allclose(pdp_sw_none["average"], pdp_sw_unit["average"]) + sample_weight = 2 * np.ones(len(y)) + pdp_sw_doubling = partial_dependence(est, **params, sample_weight=sample_weight) + assert_allclose(pdp_sw_none["average"], pdp_sw_doubling["average"]) + + +def test_partial_dependence_sample_weight_size_error(): + """Check that we raise an error when the size of `sample_weight` is not + consistent with `X` and `y`. + """ + est = LogisticRegression() + (X, y), n_targets = binary_classification_data + sample_weight = np.ones_like(y) + est.fit(X, y) + + with pytest.raises(ValueError, match="sample_weight.shape =="): + partial_dependence( + est, X, features=[0], sample_weight=sample_weight[1:], grid_resolution=10 + ) + + +def test_partial_dependence_sample_weight_with_recursion(): + """Check that we raise an error when `sample_weight` is provided with + `"recursion"` method. + """ + est = RandomForestRegressor() + (X, y), n_targets = regression_data + sample_weight = np.ones_like(y) + est.fit(X, y, sample_weight=sample_weight) + + with pytest.raises(ValueError, match="'recursion' method can only be applied when"): + partial_dependence( + est, X, features=[0], method="recursion", sample_weight=sample_weight + ) + + +# TODO(1.5): Remove when bunch values is deprecated in 1.5 +def test_partial_dependence_bunch_values_deprecated(): + """Test that deprecation warning is raised when values is accessed.""" + + est = LogisticRegression() + (X, y), _ = binary_classification_data + est.fit(X, y) + + pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average") + + msg = ( + "Key: 'values', is deprecated in 1.3 and will be " + "removed in 1.5. Please use 'grid_values' instead" + ) + + with warnings.catch_warnings(): + # Does not raise warnings with "grid_values" + warnings.simplefilter("error", FutureWarning) + grid_values = pdp_avg["grid_values"] + + with pytest.warns(FutureWarning, match=msg): + # Warns for "values" + values = pdp_avg["values"] + + # "values" and "grid_values" are the same object + assert values is grid_values + + +def test_mixed_type_categorical(): + """Check that we raise a proper error when a column has mixed types and + the sorting of `np.unique` will fail.""" + X = np.array(["A", "B", "C", np.nan], dtype=object).reshape(-1, 1) + y = np.array([0, 1, 0, 1]) + + from sklearn.preprocessing import OrdinalEncoder + + clf = make_pipeline( + OrdinalEncoder(encoded_missing_value=-1), + LogisticRegression(), + ).fit(X, y) + with pytest.raises(ValueError, match="The column #0 contains mixed data types"): + partial_dependence(clf, X, features=[0]) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5dea3834a77a70891a4efab25a560d09a49a13e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index +from sklearn.utils._testing import _convert_container + + +@pytest.mark.parametrize( + "feature_names, array_type, expected_feature_names", + [ + (None, "array", ["x0", "x1", "x2"]), + (None, "dataframe", ["a", "b", "c"]), + (np.array(["a", "b", "c"]), "array", ["a", "b", "c"]), + ], +) +def test_check_feature_names(feature_names, array_type, expected_feature_names): + X = np.random.randn(10, 3) + column_names = ["a", "b", "c"] + X = _convert_container(X, constructor_name=array_type, columns_name=column_names) + feature_names_validated = _check_feature_names(X, feature_names) + assert feature_names_validated == expected_feature_names + + +def test_check_feature_names_error(): + X = np.random.randn(10, 3) + feature_names = ["a", "b", "c", "a"] + msg = "feature_names should not contain duplicates." + with pytest.raises(ValueError, match=msg): + _check_feature_names(X, feature_names) + + +@pytest.mark.parametrize("fx, idx", [(0, 0), (1, 1), ("a", 0), ("b", 1), ("c", 2)]) +def test_get_feature_index(fx, idx): + feature_names = ["a", "b", "c"] + assert _get_feature_index(fx, feature_names) == idx + + +@pytest.mark.parametrize( + "fx, feature_names, err_msg", + [ + ("a", None, "Cannot plot partial dependence for feature 'a'"), + ("d", ["a", "b", "c"], "Feature 'd' not in feature_names"), + ], +) +def test_get_feature_names_error(fx, feature_names, err_msg): + with pytest.raises(ValueError, match=err_msg): + _get_feature_index(fx, feature_names) diff --git a/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..2869e84c78bf872647eb786c05a93ce190bc5689 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py @@ -0,0 +1,542 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.compose import ColumnTransformer +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor +from sklearn.impute import SimpleImputer +from sklearn.inspection import permutation_importance +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.metrics import ( + get_scorer, + mean_squared_error, + r2_score, +) +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, StandardScaler, scale +from sklearn.utils import parallel_backend +from sklearn.utils._testing import _convert_container + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +@pytest.mark.parametrize("sample_weight", [None, "ones"]) +def test_permutation_importance_correlated_feature_regression( + n_jobs, max_samples, sample_weight +): + # Make sure that feature highly correlated to the target have a higher + # importance + rng = np.random.RandomState(42) + n_repeats = 5 + + X, y = load_diabetes(return_X_y=True) + y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1) + + X = np.hstack([X, y_with_little_noise]) + + weights = np.ones_like(y) if sample_weight == "ones" else sample_weight + clf = RandomForestRegressor(n_estimators=10, random_state=42) + clf.fit(X, y) + + result = permutation_importance( + clf, + X, + y, + sample_weight=weights, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y was added as the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_permutation_importance_correlated_feature_regression_pandas( + n_jobs, max_samples +): + pd = pytest.importorskip("pandas") + + # Make sure that feature highly correlated to the target have a higher + # importance + rng = np.random.RandomState(42) + n_repeats = 5 + + dataset = load_iris() + X, y = dataset.data, dataset.target + y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1) + + # Adds feature correlated with y as the last column + X = pd.DataFrame(X, columns=dataset.feature_names) + X["correlated_feature"] = y_with_little_noise + + clf = RandomForestClassifier(n_estimators=10, random_state=42) + clf.fit(X, y) + + result = permutation_importance( + clf, + X, + y, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y was added as the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_robustness_to_high_cardinality_noisy_feature(n_jobs, max_samples, seed=42): + # Permutation variable importance should not be affected by the high + # cardinality bias of traditional feature importances, especially when + # computed on a held-out test set: + rng = np.random.RandomState(seed) + n_repeats = 5 + n_samples = 1000 + n_classes = 5 + n_informative_features = 2 + n_noise_features = 1 + n_features = n_informative_features + n_noise_features + + # Generate a multiclass classification dataset and a set of informative + # binary features that can be used to predict some classes of y exactly + # while leaving some classes unexplained to make the problem harder. + classes = np.arange(n_classes) + y = rng.choice(classes, size=n_samples) + X = np.hstack([(y == c).reshape(-1, 1) for c in classes[:n_informative_features]]) + X = X.astype(np.float32) + + # Not all target classes are explained by the binary class indicator + # features: + assert n_informative_features < n_classes + + # Add 10 other noisy features with high cardinality (numerical) values + # that can be used to overfit the training data. + X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1) + assert X.shape == (n_samples, n_features) + + # Split the dataset to be able to evaluate on a held-out test set. The + # Test size should be large enough for importance measurements to be + # stable: + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.5, random_state=rng + ) + clf = RandomForestClassifier(n_estimators=5, random_state=rng) + clf.fit(X_train, y_train) + + # Variable importances computed by impurity decrease on the tree node + # splits often use the noisy features in splits. This can give misleading + # impression that high cardinality noisy variables are the most important: + tree_importances = clf.feature_importances_ + informative_tree_importances = tree_importances[:n_informative_features] + noisy_tree_importances = tree_importances[n_informative_features:] + assert informative_tree_importances.max() < noisy_tree_importances.min() + + # Let's check that permutation-based feature importances do not have this + # problem. + r = permutation_importance( + clf, + X_test, + y_test, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert r.importances.shape == (X.shape[1], n_repeats) + + # Split the importances between informative and noisy features + informative_importances = r.importances_mean[:n_informative_features] + noisy_importances = r.importances_mean[n_informative_features:] + + # Because we do not have a binary variable explaining each target classes, + # the RF model will have to use the random variable to make some + # (overfitting) splits (as max_depth is not set). Therefore the noisy + # variables will be non-zero but with small values oscillating around + # zero: + assert max(np.abs(noisy_importances)) > 1e-7 + assert noisy_importances.max() < 0.05 + + # The binary features correlated with y should have a higher importance + # than the high cardinality noisy features. + # The maximum test accuracy is 2 / 5 == 0.4, each informative feature + # contributing approximately a bit more than 0.2 of accuracy. + assert informative_importances.min() > 0.15 + + +def test_permutation_importance_mixed_types(): + rng = np.random.RandomState(42) + n_repeats = 4 + + # Last column is correlated with y + X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T + y = np.array([0, 1, 0, 1]) + + clf = make_pipeline(SimpleImputer(), LogisticRegression(solver="lbfgs")) + clf.fit(X, y) + result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + # use another random state + rng = np.random.RandomState(0) + result2 = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + assert result2.importances.shape == (X.shape[1], n_repeats) + + assert not np.allclose(result.importances, result2.importances) + + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1]) + + +def test_permutation_importance_mixed_types_pandas(): + pd = pytest.importorskip("pandas") + rng = np.random.RandomState(42) + n_repeats = 5 + + # Last column is correlated with y + X = pd.DataFrame({"col1": [1.0, 2.0, 3.0, np.nan], "col2": ["a", "b", "a", "b"]}) + y = np.array([0, 1, 0, 1]) + + num_preprocess = make_pipeline(SimpleImputer(), StandardScaler()) + preprocess = ColumnTransformer( + [("num", num_preprocess, ["col1"]), ("cat", OneHotEncoder(), ["col2"])] + ) + clf = make_pipeline(preprocess, LogisticRegression(solver="lbfgs")) + clf.fit(X, y) + + result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + + assert result.importances.shape == (X.shape[1], n_repeats) + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +def test_permutation_importance_linear_regresssion(): + X, y = make_regression(n_samples=500, n_features=10, random_state=0) + + X = scale(X) + y = scale(y) + + lr = LinearRegression().fit(X, y) + + # this relationship can be computed in closed form + expected_importances = 2 * lr.coef_**2 + results = permutation_importance( + lr, X, y, n_repeats=50, scoring="neg_mean_squared_error" + ) + assert_allclose( + expected_importances, results.importances_mean, rtol=1e-1, atol=1e-6 + ) + + +@pytest.mark.parametrize("max_samples", [500, 1.0]) +def test_permutation_importance_equivalence_sequential_parallel(max_samples): + # regression test to make sure that sequential and parallel calls will + # output the same results. + # Also tests that max_samples equal to number of samples is equivalent to 1.0 + X, y = make_regression(n_samples=500, n_features=10, random_state=0) + lr = LinearRegression().fit(X, y) + + importance_sequential = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=1, max_samples=max_samples + ) + + # First check that the problem is structured enough and that the model is + # complex enough to not yield trivial, constant importances: + imp_min = importance_sequential["importances"].min() + imp_max = importance_sequential["importances"].max() + assert imp_max - imp_min > 0.3 + + # The actually check that parallelism does not impact the results + # either with shared memory (threading) or without isolated memory + # via process-based parallelism using the default backend + # ('loky' or 'multiprocessing') depending on the joblib version: + + # process-based parallelism (by default): + importance_processes = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=2 + ) + assert_allclose( + importance_processes["importances"], importance_sequential["importances"] + ) + + # thread-based parallelism: + with parallel_backend("threading"): + importance_threading = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=2 + ) + assert_allclose( + importance_threading["importances"], importance_sequential["importances"] + ) + + +@pytest.mark.parametrize("n_jobs", [None, 1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_permutation_importance_equivalence_array_dataframe(n_jobs, max_samples): + # This test checks that the column shuffling logic has the same behavior + # both a dataframe and a simple numpy array. + pd = pytest.importorskip("pandas") + + # regression test to make sure that sequential and parallel calls will + # output the same results. + X, y = make_regression(n_samples=100, n_features=5, random_state=0) + X_df = pd.DataFrame(X) + + # Add a categorical feature that is statistically linked to y: + binner = KBinsDiscretizer(n_bins=3, encode="ordinal") + cat_column = binner.fit_transform(y.reshape(-1, 1)) + + # Concatenate the extra column to the numpy array: integers will be + # cast to float values + X = np.hstack([X, cat_column]) + assert X.dtype.kind == "f" + + # Insert extra column as a non-numpy-native dtype (while keeping backward + # compat for old pandas versions): + if hasattr(pd, "Categorical"): + cat_column = pd.Categorical(cat_column.ravel()) + else: + cat_column = cat_column.ravel() + new_col_idx = len(X_df.columns) + X_df[new_col_idx] = cat_column + assert X_df[new_col_idx].dtype == cat_column.dtype + + # Stich an arbitrary index to the dataframe: + X_df.index = np.arange(len(X_df)).astype(str) + + rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0) + rf.fit(X, y) + + n_repeats = 3 + importance_array = permutation_importance( + rf, + X, + y, + n_repeats=n_repeats, + random_state=0, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + # First check that the problem is structured enough and that the model is + # complex enough to not yield trivial, constant importances: + imp_min = importance_array["importances"].min() + imp_max = importance_array["importances"].max() + assert imp_max - imp_min > 0.3 + + # Now check that importances computed on dataframe matche the values + # of those computed on the array with the same data. + importance_dataframe = permutation_importance( + rf, + X_df, + y, + n_repeats=n_repeats, + random_state=0, + n_jobs=n_jobs, + max_samples=max_samples, + ) + assert_allclose( + importance_array["importances"], importance_dataframe["importances"] + ) + + +@pytest.mark.parametrize("input_type", ["array", "dataframe"]) +def test_permutation_importance_large_memmaped_data(input_type): + # Smoke, non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15810 + n_samples, n_features = int(5e4), 4 + X, y = make_classification( + n_samples=n_samples, n_features=n_features, random_state=0 + ) + assert X.nbytes > 1e6 # trigger joblib memmaping + + X = _convert_container(X, input_type) + clf = DummyClassifier(strategy="prior").fit(X, y) + + # Actual smoke test: should not raise any error: + n_repeats = 5 + r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2) + + # Auxiliary check: DummyClassifier is feature independent: + # permutating feature should not change the predictions + expected_importances = np.zeros((n_features, n_repeats)) + assert_allclose(expected_importances, r.importances) + + +def test_permutation_importance_sample_weight(): + # Creating data with 2 features and 1000 samples, where the target + # variable is a linear combination of the two features, such that + # in half of the samples the impact of feature 1 is twice the impact of + # feature 2, and vice versa on the other half of the samples. + rng = np.random.RandomState(1) + n_samples = 1000 + n_features = 2 + n_half_samples = n_samples // 2 + x = rng.normal(0.0, 0.001, (n_samples, n_features)) + y = np.zeros(n_samples) + y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1] + y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1] + + # Fitting linear regression with perfect prediction + lr = LinearRegression(fit_intercept=False) + lr.fit(x, y) + + # When all samples are weighted with the same weights, the ratio of + # the two features importance should equal to 1 on expectation (when using + # mean absolutes error as the loss function). + pi = permutation_importance( + lr, x, y, random_state=1, scoring="neg_mean_absolute_error", n_repeats=200 + ) + x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01) + + # When passing a vector of ones as the sample_weight, results should be + # the same as in the case that sample_weight=None. + w = np.ones(n_samples) + pi = permutation_importance( + lr, + x, + y, + random_state=1, + scoring="neg_mean_absolute_error", + n_repeats=200, + sample_weight=w, + ) + x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w_ones == pytest.approx(x1_x2_imp_ratio_w_none, 0.01) + + # When the ratio between the weights of the first half of the samples and + # the second half of the samples approaches to infinity, the ratio of + # the two features importance should equal to 2 on expectation (when using + # mean absolutes error as the loss function). + w = np.hstack( + [np.repeat(10.0**10, n_half_samples), np.repeat(1.0, n_half_samples)] + ) + lr.fit(x, y, w) + pi = permutation_importance( + lr, + x, + y, + random_state=1, + scoring="neg_mean_absolute_error", + n_repeats=200, + sample_weight=w, + ) + x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01) + + +def test_permutation_importance_no_weights_scoring_function(): + # Creating a scorer function that does not takes sample_weight + def my_scorer(estimator, X, y): + return 1 + + # Creating some data and estimator for the permutation test + x = np.array([[1, 2], [3, 4]]) + y = np.array([1, 2]) + w = np.array([1, 1]) + lr = LinearRegression() + lr.fit(x, y) + + # test that permutation_importance does not return error when + # sample_weight is None + try: + permutation_importance(lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1) + except TypeError: + pytest.fail( + "permutation_test raised an error when using a scorer " + "function that does not accept sample_weight even though " + "sample_weight was None" + ) + + # test that permutation_importance raise exception when sample_weight is + # not None + with pytest.raises(TypeError): + permutation_importance( + lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1, sample_weight=w + ) + + +@pytest.mark.parametrize( + "list_single_scorer, multi_scorer", + [ + (["r2", "neg_mean_squared_error"], ["r2", "neg_mean_squared_error"]), + ( + ["r2", "neg_mean_squared_error"], + { + "r2": get_scorer("r2"), + "neg_mean_squared_error": get_scorer("neg_mean_squared_error"), + }, + ), + ( + ["r2", "neg_mean_squared_error"], + lambda estimator, X, y: { + "r2": r2_score(y, estimator.predict(X)), + "neg_mean_squared_error": -mean_squared_error(y, estimator.predict(X)), + }, + ), + ], +) +def test_permutation_importance_multi_metric(list_single_scorer, multi_scorer): + # Test permutation importance when scoring contains multiple scorers + + # Creating some data and estimator for the permutation test + x, y = make_regression(n_samples=500, n_features=10, random_state=0) + lr = LinearRegression().fit(x, y) + + multi_importance = permutation_importance( + lr, x, y, random_state=1, scoring=multi_scorer, n_repeats=2 + ) + assert set(multi_importance.keys()) == set(list_single_scorer) + + for scorer in list_single_scorer: + multi_result = multi_importance[scorer] + single_result = permutation_importance( + lr, x, y, random_state=1, scoring=scorer, n_repeats=2 + ) + + assert_allclose(multi_result.importances, single_result.importances) + + +def test_permutation_importance_max_samples_error(): + """Check that a proper error message is raised when `max_samples` is not + set to a valid input value. + """ + X = np.array([(1.0, 2.0, 3.0, 4.0)]).T + y = np.array([0, 1, 0, 1]) + + clf = LogisticRegression() + clf.fit(X, y) + + err_msg = r"max_samples must be <= n_samples" + + with pytest.raises(ValueError, match=err_msg): + permutation_importance(clf, X, y, max_samples=5) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..713c5fe651dbbe94237d388c07c1ed81533f0a18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/__init__.py @@ -0,0 +1,180 @@ +""" +The :mod:`sklearn.metrics` module includes score functions, performance metrics +and pairwise metrics and distance computations. +""" + + +from . import cluster +from ._classification import ( + accuracy_score, + balanced_accuracy_score, + brier_score_loss, + class_likelihood_ratios, + classification_report, + cohen_kappa_score, + confusion_matrix, + f1_score, + fbeta_score, + hamming_loss, + hinge_loss, + jaccard_score, + log_loss, + matthews_corrcoef, + multilabel_confusion_matrix, + precision_recall_fscore_support, + precision_score, + recall_score, + zero_one_loss, +) +from ._dist_metrics import DistanceMetric +from ._plot.confusion_matrix import ConfusionMatrixDisplay +from ._plot.det_curve import DetCurveDisplay +from ._plot.precision_recall_curve import PrecisionRecallDisplay +from ._plot.regression import PredictionErrorDisplay +from ._plot.roc_curve import RocCurveDisplay +from ._ranking import ( + auc, + average_precision_score, + coverage_error, + dcg_score, + det_curve, + label_ranking_average_precision_score, + label_ranking_loss, + ndcg_score, + precision_recall_curve, + roc_auc_score, + roc_curve, + top_k_accuracy_score, +) +from ._regression import ( + d2_absolute_error_score, + d2_pinball_score, + d2_tweedie_score, + explained_variance_score, + max_error, + mean_absolute_error, + mean_absolute_percentage_error, + mean_gamma_deviance, + mean_pinball_loss, + mean_poisson_deviance, + mean_squared_error, + mean_squared_log_error, + mean_tweedie_deviance, + median_absolute_error, + r2_score, + root_mean_squared_error, + root_mean_squared_log_error, +) +from ._scorer import check_scoring, get_scorer, get_scorer_names, make_scorer +from .cluster import ( + adjusted_mutual_info_score, + adjusted_rand_score, + calinski_harabasz_score, + completeness_score, + consensus_score, + davies_bouldin_score, + fowlkes_mallows_score, + homogeneity_completeness_v_measure, + homogeneity_score, + mutual_info_score, + normalized_mutual_info_score, + pair_confusion_matrix, + rand_score, + silhouette_samples, + silhouette_score, + v_measure_score, +) +from .pairwise import ( + euclidean_distances, + nan_euclidean_distances, + pairwise_distances, + pairwise_distances_argmin, + pairwise_distances_argmin_min, + pairwise_distances_chunked, + pairwise_kernels, +) + +__all__ = [ + "accuracy_score", + "adjusted_mutual_info_score", + "adjusted_rand_score", + "auc", + "average_precision_score", + "balanced_accuracy_score", + "calinski_harabasz_score", + "check_scoring", + "class_likelihood_ratios", + "classification_report", + "cluster", + "cohen_kappa_score", + "completeness_score", + "ConfusionMatrixDisplay", + "confusion_matrix", + "consensus_score", + "coverage_error", + "d2_tweedie_score", + "d2_absolute_error_score", + "d2_pinball_score", + "dcg_score", + "davies_bouldin_score", + "DetCurveDisplay", + "det_curve", + "DistanceMetric", + "euclidean_distances", + "explained_variance_score", + "f1_score", + "fbeta_score", + "fowlkes_mallows_score", + "get_scorer", + "hamming_loss", + "hinge_loss", + "homogeneity_completeness_v_measure", + "homogeneity_score", + "jaccard_score", + "label_ranking_average_precision_score", + "label_ranking_loss", + "log_loss", + "make_scorer", + "nan_euclidean_distances", + "matthews_corrcoef", + "max_error", + "mean_absolute_error", + "mean_squared_error", + "mean_squared_log_error", + "mean_pinball_loss", + "mean_poisson_deviance", + "mean_gamma_deviance", + "mean_tweedie_deviance", + "median_absolute_error", + "mean_absolute_percentage_error", + "multilabel_confusion_matrix", + "mutual_info_score", + "ndcg_score", + "normalized_mutual_info_score", + "pair_confusion_matrix", + "pairwise_distances", + "pairwise_distances_argmin", + "pairwise_distances_argmin_min", + "pairwise_distances_chunked", + "pairwise_kernels", + "PrecisionRecallDisplay", + "precision_recall_curve", + "precision_recall_fscore_support", + "precision_score", + "PredictionErrorDisplay", + "r2_score", + "rand_score", + "recall_score", + "RocCurveDisplay", + "roc_auc_score", + "roc_curve", + "root_mean_squared_log_error", + "root_mean_squared_error", + "get_scorer_names", + "silhouette_samples", + "silhouette_score", + "top_k_accuracy_score", + "v_measure_score", + "zero_one_loss", + "brier_score_loss", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_base.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..53ff14b039e0cd781668747465f99a9b05a1ca3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_base.py @@ -0,0 +1,199 @@ +""" +Common code for all metrics. + +""" +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Arnaud Joly +# Jochen Wersdorfer +# Lars Buitinck +# Joel Nothman +# Noel Dawe +# License: BSD 3 clause + +from itertools import combinations + +import numpy as np + +from ..utils import check_array, check_consistent_length +from ..utils.multiclass import type_of_target + + +def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None): + """Average a binary metric for multilabel classification. + + Parameters + ---------- + y_true : array, shape = [n_samples] or [n_samples, n_classes] + True binary labels in binary label indicators. + + y_score : array, shape = [n_samples] or [n_samples, n_classes] + Target scores, can either be probability estimates of the positive + class, confidence values, or binary decisions. + + average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro' + If ``None``, the scores for each class are returned. Otherwise, + this determines the type of averaging performed on the data: + + ``'micro'``: + Calculate metrics globally by considering each element of the label + indicator matrix as a label. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average, weighted + by support (the number of true instances for each label). + ``'samples'``: + Calculate metrics for each instance, and find their average. + + Will be ignored when ``y_true`` is binary. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + binary_metric : callable, returns shape [n_classes] + The binary metric function to use. + + Returns + ------- + score : float or array of shape [n_classes] + If not ``None``, average the score, else return the score for each + classes. + + """ + average_options = (None, "micro", "macro", "weighted", "samples") + if average not in average_options: + raise ValueError("average has to be one of {0}".format(average_options)) + + y_type = type_of_target(y_true) + if y_type not in ("binary", "multilabel-indicator"): + raise ValueError("{0} format is not supported".format(y_type)) + + if y_type == "binary": + return binary_metric(y_true, y_score, sample_weight=sample_weight) + + check_consistent_length(y_true, y_score, sample_weight) + y_true = check_array(y_true) + y_score = check_array(y_score) + + not_average_axis = 1 + score_weight = sample_weight + average_weight = None + + if average == "micro": + if score_weight is not None: + score_weight = np.repeat(score_weight, y_true.shape[1]) + y_true = y_true.ravel() + y_score = y_score.ravel() + + elif average == "weighted": + if score_weight is not None: + average_weight = np.sum( + np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0 + ) + else: + average_weight = np.sum(y_true, axis=0) + if np.isclose(average_weight.sum(), 0.0): + return 0 + + elif average == "samples": + # swap average_weight <-> score_weight + average_weight = score_weight + score_weight = None + not_average_axis = 0 + + if y_true.ndim == 1: + y_true = y_true.reshape((-1, 1)) + + if y_score.ndim == 1: + y_score = y_score.reshape((-1, 1)) + + n_classes = y_score.shape[not_average_axis] + score = np.zeros((n_classes,)) + for c in range(n_classes): + y_true_c = y_true.take([c], axis=not_average_axis).ravel() + y_score_c = y_score.take([c], axis=not_average_axis).ravel() + score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight) + + # Average the results + if average is not None: + if average_weight is not None: + # Scores with 0 weights are forced to be 0, preventing the average + # score from being affected by 0-weighted NaN elements. + average_weight = np.asarray(average_weight) + score[average_weight == 0] = 0 + return np.average(score, weights=average_weight) + else: + return score + + +def _average_multiclass_ovo_score(binary_metric, y_true, y_score, average="macro"): + """Average one-versus-one scores for multiclass classification. + + Uses the binary metric for one-vs-one multiclass classification, + where the score is computed according to the Hand & Till (2001) algorithm. + + Parameters + ---------- + binary_metric : callable + The binary metric function to use that accepts the following as input: + y_true_target : array, shape = [n_samples_target] + Some sub-array of y_true for a pair of classes designated + positive and negative in the one-vs-one scheme. + y_score_target : array, shape = [n_samples_target] + Scores corresponding to the probability estimates + of a sample belonging to the designated positive class label + + y_true : array-like of shape (n_samples,) + True multiclass labels. + + y_score : array-like of shape (n_samples, n_classes) + Target scores corresponding to probability estimates of a sample + belonging to a particular class. + + average : {'macro', 'weighted'}, default='macro' + Determines the type of averaging performed on the pairwise binary + metric scores: + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. Classes + are assumed to be uniformly distributed. + ``'weighted'``: + Calculate metrics for each label, taking into account the + prevalence of the classes. + + Returns + ------- + score : float + Average of the pairwise binary metric scores. + """ + check_consistent_length(y_true, y_score) + + y_true_unique = np.unique(y_true) + n_classes = y_true_unique.shape[0] + n_pairs = n_classes * (n_classes - 1) // 2 + pair_scores = np.empty(n_pairs) + + is_weighted = average == "weighted" + prevalence = np.empty(n_pairs) if is_weighted else None + + # Compute scores treating a as positive class and b as negative class, + # then b as positive class and a as negative class + for ix, (a, b) in enumerate(combinations(y_true_unique, 2)): + a_mask = y_true == a + b_mask = y_true == b + ab_mask = np.logical_or(a_mask, b_mask) + + if is_weighted: + prevalence[ix] = np.average(ab_mask) + + a_true = a_mask[ab_mask] + b_true = b_mask[ab_mask] + + a_true_score = binary_metric(a_true, y_score[ab_mask, a]) + b_true_score = binary_metric(b_true, y_score[ab_mask, b]) + pair_scores[ix] = (a_true_score + b_true_score) / 2 + + return np.average(pair_scores, weights=prevalence) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_classification.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..ff8434b39c8d7f8e94c354081bbec8eca0797b66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_classification.py @@ -0,0 +1,3254 @@ +"""Metrics to assess performance on classification task given class prediction. + +Functions named as ``*_score`` return a scalar value to maximize: the higher +the better. + +Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: +the lower the better. +""" + +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Arnaud Joly +# Jochen Wersdorfer +# Lars Buitinck +# Joel Nothman +# Noel Dawe +# Jatin Shah +# Saurabh Jha +# Bernardo Stein +# Shangwu Yao +# Michal Karbownik +# License: BSD 3 clause + + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import coo_matrix, csr_matrix +from scipy.special import xlogy + +from ..exceptions import UndefinedMetricWarning +from ..preprocessing import LabelBinarizer, LabelEncoder +from ..utils import ( + assert_all_finite, + check_array, + check_consistent_length, + column_or_1d, +) +from ..utils._array_api import _union1d, _weighted_sum, get_namespace +from ..utils._param_validation import Interval, Options, StrOptions, validate_params +from ..utils.extmath import _nanaverage +from ..utils.multiclass import type_of_target, unique_labels +from ..utils.sparsefuncs import count_nonzero +from ..utils.validation import _check_pos_label_consistency, _num_samples + + +def _check_zero_division(zero_division): + if isinstance(zero_division, str) and zero_division == "warn": + return np.float64(0.0) + elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]: + return np.float64(zero_division) + else: # np.isnan(zero_division) + return np.nan + + +def _check_targets(y_true, y_pred): + """Check that y_true and y_pred belong to the same classification task. + + This converts multiclass or binary types to a common shape, and raises a + ValueError for a mix of multilabel and multiclass targets, a mix of + multilabel formats, for the presence of continuous-valued or multioutput + targets, or for targets of different lengths. + + Column vectors are squeezed to 1d, while multilabel formats are returned + as CSR sparse label indicators. + + Parameters + ---------- + y_true : array-like + + y_pred : array-like + + Returns + ------- + type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} + The type of the true target data, as output by + ``utils.multiclass.type_of_target``. + + y_true : array or indicator matrix + + y_pred : array or indicator matrix + """ + check_consistent_length(y_true, y_pred) + type_true = type_of_target(y_true, input_name="y_true") + type_pred = type_of_target(y_pred, input_name="y_pred") + + y_type = {type_true, type_pred} + if y_type == {"binary", "multiclass"}: + y_type = {"multiclass"} + + if len(y_type) > 1: + raise ValueError( + "Classification metrics can't handle a mix of {0} and {1} targets".format( + type_true, type_pred + ) + ) + + # We can't have more than one value on y_type => The set is no more needed + y_type = y_type.pop() + + # No metrics support "multiclass-multioutput" format + if y_type not in ["binary", "multiclass", "multilabel-indicator"]: + raise ValueError("{0} is not supported".format(y_type)) + + if y_type in ["binary", "multiclass"]: + xp, _ = get_namespace(y_true, y_pred) + y_true = column_or_1d(y_true) + y_pred = column_or_1d(y_pred) + if y_type == "binary": + try: + unique_values = _union1d(y_true, y_pred, xp) + except TypeError as e: + # We expect y_true and y_pred to be of the same data type. + # If `y_true` was provided to the classifier as strings, + # `y_pred` given by the classifier will also be encoded with + # strings. So we raise a meaningful error + raise TypeError( + "Labels in y_true and y_pred should be of the same type. " + f"Got y_true={xp.unique(y_true)} and " + f"y_pred={xp.unique(y_pred)}. Make sure that the " + "predictions provided by the classifier coincides with " + "the true labels." + ) from e + if unique_values.shape[0] > 2: + y_type = "multiclass" + + if y_type.startswith("multilabel"): + y_true = csr_matrix(y_true) + y_pred = csr_matrix(y_pred) + y_type = "multilabel-indicator" + + return y_type, y_true, y_pred + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "normalize": ["boolean"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None): + """Accuracy classification score. + + In multilabel classification, this function computes subset accuracy: + the set of labels predicted for a sample must *exactly* match the + corresponding set of labels in y_true. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) labels. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Predicted labels, as returned by a classifier. + + normalize : bool, default=True + If ``False``, return the number of correctly classified samples. + Otherwise, return the fraction of correctly classified samples. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + If ``normalize == True``, return the fraction of correctly + classified samples (float), else returns the number of correctly + classified samples (int). + + The best performance is 1 with ``normalize == True`` and the number + of samples with ``normalize == False``. + + See Also + -------- + balanced_accuracy_score : Compute the balanced accuracy to deal with + imbalanced datasets. + jaccard_score : Compute the Jaccard similarity coefficient score. + hamming_loss : Compute the average Hamming loss or Hamming distance between + two sets of samples. + zero_one_loss : Compute the Zero-one classification loss. By default, the + function will return the percentage of imperfectly predicted subsets. + + Notes + ----- + In binary classification, this function is equal to the `jaccard_score` + function. + + Examples + -------- + >>> from sklearn.metrics import accuracy_score + >>> y_pred = [0, 2, 1, 3] + >>> y_true = [0, 1, 2, 3] + >>> accuracy_score(y_true, y_pred) + 0.5 + >>> accuracy_score(y_true, y_pred, normalize=False) + 2.0 + + In the multilabel case with binary label indicators: + + >>> import numpy as np + >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) + 0.5 + """ + + # Compute accuracy for each possible representation + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + check_consistent_length(y_true, y_pred, sample_weight) + if y_type.startswith("multilabel"): + differing_labels = count_nonzero(y_true - y_pred, axis=1) + score = differing_labels == 0 + else: + score = y_true == y_pred + + return _weighted_sum(score, sample_weight, normalize) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "labels": ["array-like", None], + "sample_weight": ["array-like", None], + "normalize": [StrOptions({"true", "pred", "all"}), None], + }, + prefer_skip_nested_validation=True, +) +def confusion_matrix( + y_true, y_pred, *, labels=None, sample_weight=None, normalize=None +): + """Compute confusion matrix to evaluate the accuracy of a classification. + + By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` + is equal to the number of observations known to be in group :math:`i` and + predicted to be in group :math:`j`. + + Thus in binary classification, the count of true negatives is + :math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is + :math:`C_{1,1}` and false positives is :math:`C_{0,1}`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) + Estimated targets as returned by a classifier. + + labels : array-like of shape (n_classes), default=None + List of labels to index the matrix. This may be used to reorder + or select a subset of labels. + If ``None`` is given, those that appear at least once + in ``y_true`` or ``y_pred`` are used in sorted order. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + .. versionadded:: 0.18 + + normalize : {'true', 'pred', 'all'}, default=None + Normalizes confusion matrix over the true (rows), predicted (columns) + conditions or all the population. If None, confusion matrix will not be + normalized. + + Returns + ------- + C : ndarray of shape (n_classes, n_classes) + Confusion matrix whose i-th row and j-th + column entry indicates the number of + samples with true label being i-th class + and predicted label being j-th class. + + See Also + -------- + ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix + given an estimator, the data, and the label. + ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix + given the true and predicted labels. + ConfusionMatrixDisplay : Confusion Matrix visualization. + + References + ---------- + .. [1] `Wikipedia entry for the Confusion matrix + `_ + (Wikipedia and other references may use a different + convention for axes). + + Examples + -------- + >>> from sklearn.metrics import confusion_matrix + >>> y_true = [2, 0, 2, 2, 0, 1] + >>> y_pred = [0, 0, 2, 2, 0, 2] + >>> confusion_matrix(y_true, y_pred) + array([[2, 0, 0], + [0, 0, 1], + [1, 0, 2]]) + + >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] + >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] + >>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"]) + array([[2, 0, 0], + [0, 0, 1], + [1, 0, 2]]) + + In the binary case, we can extract true positives, etc. as follows: + + >>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel() + >>> (tn, fp, fn, tp) + (0, 2, 1, 1) + """ + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + if y_type not in ("binary", "multiclass"): + raise ValueError("%s is not supported" % y_type) + + if labels is None: + labels = unique_labels(y_true, y_pred) + else: + labels = np.asarray(labels) + n_labels = labels.size + if n_labels == 0: + raise ValueError("'labels' should contains at least one label.") + elif y_true.size == 0: + return np.zeros((n_labels, n_labels), dtype=int) + elif len(np.intersect1d(y_true, labels)) == 0: + raise ValueError("At least one label specified must be in y_true") + + if sample_weight is None: + sample_weight = np.ones(y_true.shape[0], dtype=np.int64) + else: + sample_weight = np.asarray(sample_weight) + + check_consistent_length(y_true, y_pred, sample_weight) + + n_labels = labels.size + # If labels are not consecutive integers starting from zero, then + # y_true and y_pred must be converted into index form + need_index_conversion = not ( + labels.dtype.kind in {"i", "u", "b"} + and np.all(labels == np.arange(n_labels)) + and y_true.min() >= 0 + and y_pred.min() >= 0 + ) + if need_index_conversion: + label_to_ind = {y: x for x, y in enumerate(labels)} + y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) + y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) + + # intersect y_pred, y_true with labels, eliminate items not in labels + ind = np.logical_and(y_pred < n_labels, y_true < n_labels) + if not np.all(ind): + y_pred = y_pred[ind] + y_true = y_true[ind] + # also eliminate weights of eliminated items + sample_weight = sample_weight[ind] + + # Choose the accumulator dtype to always have high precision + if sample_weight.dtype.kind in {"i", "u", "b"}: + dtype = np.int64 + else: + dtype = np.float64 + + cm = coo_matrix( + (sample_weight, (y_true, y_pred)), + shape=(n_labels, n_labels), + dtype=dtype, + ).toarray() + + with np.errstate(all="ignore"): + if normalize == "true": + cm = cm / cm.sum(axis=1, keepdims=True) + elif normalize == "pred": + cm = cm / cm.sum(axis=0, keepdims=True) + elif normalize == "all": + cm = cm / cm.sum() + cm = np.nan_to_num(cm) + + if cm.shape == (1, 1): + warnings.warn( + ( + "A single label was found in 'y_true' and 'y_pred'. For the confusion " + "matrix to have the correct shape, use the 'labels' parameter to pass " + "all known labels." + ), + UserWarning, + ) + + return cm + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "sample_weight": ["array-like", None], + "labels": ["array-like", None], + "samplewise": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def multilabel_confusion_matrix( + y_true, y_pred, *, sample_weight=None, labels=None, samplewise=False +): + """Compute a confusion matrix for each class or sample. + + .. versionadded:: 0.21 + + Compute class-wise (default) or sample-wise (samplewise=True) multilabel + confusion matrix to evaluate the accuracy of a classification, and output + confusion matrices for each class or sample. + + In multilabel confusion matrix :math:`MCM`, the count of true negatives + is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`, + true positives is :math:`MCM_{:,1,1}` and false positives is + :math:`MCM_{:,0,1}`. + + Multiclass data will be treated as if binarized under a one-vs-rest + transformation. Returned confusion matrices will be in the order of + sorted unique labels in the union of (y_true, y_pred). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \ + (n_samples,) + Ground truth (correct) target values. + + y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \ + (n_samples,) + Estimated targets as returned by a classifier. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + labels : array-like of shape (n_classes,), default=None + A list of classes or column indices to select some (or to force + inclusion of classes absent from the data). + + samplewise : bool, default=False + In the multilabel case, this calculates a confusion matrix per sample. + + Returns + ------- + multi_confusion : ndarray of shape (n_outputs, 2, 2) + A 2x2 confusion matrix corresponding to each output in the input. + When calculating class-wise multi_confusion (default), then + n_outputs = n_labels; when calculating sample-wise multi_confusion + (samplewise=True), n_outputs = n_samples. If ``labels`` is defined, + the results will be returned in the order specified in ``labels``, + otherwise the results will be returned in sorted order by default. + + See Also + -------- + confusion_matrix : Compute confusion matrix to evaluate the accuracy of a + classifier. + + Notes + ----- + The `multilabel_confusion_matrix` calculates class-wise or sample-wise + multilabel confusion matrices, and in multiclass tasks, labels are + binarized under a one-vs-rest way; while + :func:`~sklearn.metrics.confusion_matrix` calculates one confusion matrix + for confusion between every two classes. + + Examples + -------- + Multilabel-indicator case: + + >>> import numpy as np + >>> from sklearn.metrics import multilabel_confusion_matrix + >>> y_true = np.array([[1, 0, 1], + ... [0, 1, 0]]) + >>> y_pred = np.array([[1, 0, 0], + ... [0, 1, 1]]) + >>> multilabel_confusion_matrix(y_true, y_pred) + array([[[1, 0], + [0, 1]], + + [[1, 0], + [0, 1]], + + [[0, 1], + [1, 0]]]) + + Multiclass case: + + >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] + >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] + >>> multilabel_confusion_matrix(y_true, y_pred, + ... labels=["ant", "bird", "cat"]) + array([[[3, 1], + [0, 2]], + + [[5, 0], + [1, 0]], + + [[2, 1], + [1, 2]]]) + """ + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + if sample_weight is not None: + sample_weight = column_or_1d(sample_weight) + check_consistent_length(y_true, y_pred, sample_weight) + + if y_type not in ("binary", "multiclass", "multilabel-indicator"): + raise ValueError("%s is not supported" % y_type) + + present_labels = unique_labels(y_true, y_pred) + if labels is None: + labels = present_labels + n_labels = None + else: + n_labels = len(labels) + labels = np.hstack( + [labels, np.setdiff1d(present_labels, labels, assume_unique=True)] + ) + + if y_true.ndim == 1: + if samplewise: + raise ValueError( + "Samplewise metrics are not available outside of " + "multilabel classification." + ) + + le = LabelEncoder() + le.fit(labels) + y_true = le.transform(y_true) + y_pred = le.transform(y_pred) + sorted_labels = le.classes_ + + # labels are now from 0 to len(labels) - 1 -> use bincount + tp = y_true == y_pred + tp_bins = y_true[tp] + if sample_weight is not None: + tp_bins_weights = np.asarray(sample_weight)[tp] + else: + tp_bins_weights = None + + if len(tp_bins): + tp_sum = np.bincount( + tp_bins, weights=tp_bins_weights, minlength=len(labels) + ) + else: + # Pathological case + true_sum = pred_sum = tp_sum = np.zeros(len(labels)) + if len(y_pred): + pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) + if len(y_true): + true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) + + # Retain only selected labels + indices = np.searchsorted(sorted_labels, labels[:n_labels]) + tp_sum = tp_sum[indices] + true_sum = true_sum[indices] + pred_sum = pred_sum[indices] + + else: + sum_axis = 1 if samplewise else 0 + + # All labels are index integers for multilabel. + # Select labels: + if not np.array_equal(labels, present_labels): + if np.max(labels) > np.max(present_labels): + raise ValueError( + "All labels must be in [0, n labels) for " + "multilabel targets. " + "Got %d > %d" % (np.max(labels), np.max(present_labels)) + ) + if np.min(labels) < 0: + raise ValueError( + "All labels must be in [0, n labels) for " + "multilabel targets. " + "Got %d < 0" + % np.min(labels) + ) + + if n_labels is not None: + y_true = y_true[:, labels[:n_labels]] + y_pred = y_pred[:, labels[:n_labels]] + + # calculate weighted counts + true_and_pred = y_true.multiply(y_pred) + tp_sum = count_nonzero( + true_and_pred, axis=sum_axis, sample_weight=sample_weight + ) + pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) + true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) + + fp = pred_sum - tp_sum + fn = true_sum - tp_sum + tp = tp_sum + + if sample_weight is not None and samplewise: + sample_weight = np.array(sample_weight) + tp = np.array(tp) + fp = np.array(fp) + fn = np.array(fn) + tn = sample_weight * y_true.shape[1] - tp - fp - fn + elif sample_weight is not None: + tn = sum(sample_weight) - tp - fp - fn + elif samplewise: + tn = y_true.shape[1] - tp - fp - fn + else: + tn = y_true.shape[0] - tp - fp - fn + + return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2) + + +@validate_params( + { + "y1": ["array-like"], + "y2": ["array-like"], + "labels": ["array-like", None], + "weights": [StrOptions({"linear", "quadratic"}), None], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def cohen_kappa_score(y1, y2, *, labels=None, weights=None, sample_weight=None): + r"""Compute Cohen's kappa: a statistic that measures inter-annotator agreement. + + This function computes Cohen's kappa [1]_, a score that expresses the level + of agreement between two annotators on a classification problem. It is + defined as + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement on the label + assigned to any sample (the observed agreement ratio), and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels [2]_. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y1 : array-like of shape (n_samples,) + Labels assigned by the first annotator. + + y2 : array-like of shape (n_samples,) + Labels assigned by the second annotator. The kappa statistic is + symmetric, so swapping ``y1`` and ``y2`` doesn't change the value. + + labels : array-like of shape (n_classes,), default=None + List of labels to index the matrix. This may be used to select a + subset of labels. If `None`, all labels that appear at least once in + ``y1`` or ``y2`` are used. + + weights : {'linear', 'quadratic'}, default=None + Weighting type to calculate the score. `None` means no weighted; + "linear" means linear weighted; "quadratic" means quadratic weighted. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + kappa : float + The kappa statistic, which is a number between -1 and 1. The maximum + value means complete agreement; zero or lower means chance agreement. + + References + ---------- + .. [1] :doi:`J. Cohen (1960). "A coefficient of agreement for nominal scales". + Educational and Psychological Measurement 20(1):37-46. + <10.1177/001316446002000104>` + .. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for + computational linguistics". Computational Linguistics 34(4):555-596 + `_. + .. [3] `Wikipedia entry for the Cohen's kappa + `_. + + Examples + -------- + >>> from sklearn.metrics import cohen_kappa_score + >>> y1 = ["negative", "positive", "negative", "neutral", "positive"] + >>> y2 = ["negative", "positive", "negative", "neutral", "negative"] + >>> cohen_kappa_score(y1, y2) + 0.6875 + """ + confusion = confusion_matrix(y1, y2, labels=labels, sample_weight=sample_weight) + n_classes = confusion.shape[0] + sum0 = np.sum(confusion, axis=0) + sum1 = np.sum(confusion, axis=1) + expected = np.outer(sum0, sum1) / np.sum(sum0) + + if weights is None: + w_mat = np.ones([n_classes, n_classes], dtype=int) + w_mat.flat[:: n_classes + 1] = 0 + else: # "linear" or "quadratic" + w_mat = np.zeros([n_classes, n_classes], dtype=int) + w_mat += np.arange(n_classes) + if weights == "linear": + w_mat = np.abs(w_mat - w_mat.T) + else: + w_mat = (w_mat - w_mat.T) ** 2 + + k = np.sum(w_mat * confusion) / np.sum(w_mat * expected) + return 1 - k + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "labels": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + "average": [ + StrOptions({"micro", "macro", "samples", "weighted", "binary"}), + None, + ], + "sample_weight": ["array-like", None], + "zero_division": [ + Options(Real, {0, 1}), + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def jaccard_score( + y_true, + y_pred, + *, + labels=None, + pos_label=1, + average="binary", + sample_weight=None, + zero_division="warn", +): + """Jaccard similarity coefficient score. + + The Jaccard index [1], or Jaccard similarity coefficient, defined as + the size of the intersection divided by the size of the union of two label + sets, is used to compare set of predicted labels for a sample to the + corresponding set of labels in ``y_true``. + + Support beyond term:`binary` targets is achieved by treating :term:`multiclass` + and :term:`multilabel` data as a collection of binary problems, one for each + label. For the :term:`binary` case, setting `average='binary'` will return the + Jaccard similarity coefficient for `pos_label`. If `average` is not `'binary'`, + `pos_label` is ignored and scores for both classes are computed, then averaged or + both returned (when `average=None`). Similarly, for :term:`multiclass` and + :term:`multilabel` targets, scores for all `labels` are either returned or + averaged depending on the `average` parameter. Use `labels` specify the set of + labels to calculate the score for. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) labels. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Predicted labels, as returned by a classifier. + + labels : array-like of shape (n_classes,), default=None + The set of labels to include when `average != 'binary'`, and their + order if `average is None`. Labels present in the data can be + excluded, for example in multiclass classification to exclude a "negative + class". Labels not present in the data can be included and will be + "assigned" 0 samples. For multilabel targets, labels are column indices. + By default, all labels in `y_true` and `y_pred` are used in sorted order. + + pos_label : int, float, bool or str, default=1 + The class to report if `average='binary'` and the data is binary, + otherwise this parameter is ignored. + For multiclass or multilabel targets, set `labels=[pos_label]` and + `average != 'binary'` to report metrics for one label only. + + average : {'micro', 'macro', 'samples', 'weighted', \ + 'binary'} or None, default='binary' + If ``None``, the scores for each class are returned. Otherwise, this + determines the type of averaging performed on the data: + + ``'binary'``: + Only report results for the class specified by ``pos_label``. + This is applicable only if targets (``y_{true,pred}``) are binary. + ``'micro'``: + Calculate metrics globally by counting the total true positives, + false negatives and false positives. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average, weighted + by support (the number of true instances for each label). This + alters 'macro' to account for label imbalance. + ``'samples'``: + Calculate metrics for each instance, and find their average (only + meaningful for multilabel classification). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + zero_division : "warn", {0.0, 1.0}, default="warn" + Sets the value to return when there is a zero division, i.e. when there + there are no negative values in predictions and labels. If set to + "warn", this acts like 0, but a warning is also raised. + + Returns + ------- + score : float or ndarray of shape (n_unique_labels,), dtype=np.float64 + The Jaccard score. When `average` is not `None`, a single scalar is + returned. + + See Also + -------- + accuracy_score : Function for calculating the accuracy score. + f1_score : Function for calculating the F1 score. + multilabel_confusion_matrix : Function for computing a confusion matrix\ + for each class or sample. + + Notes + ----- + :func:`jaccard_score` may be a poor metric if there are no + positives for some samples or classes. Jaccard is undefined if there are + no true or predicted labels, and our implementation will return a score + of 0 with a warning. + + References + ---------- + .. [1] `Wikipedia entry for the Jaccard index + `_. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import jaccard_score + >>> y_true = np.array([[0, 1, 1], + ... [1, 1, 0]]) + >>> y_pred = np.array([[1, 1, 1], + ... [1, 0, 0]]) + + In the binary case: + + >>> jaccard_score(y_true[0], y_pred[0]) + 0.6666... + + In the 2D comparison case (e.g. image similarity): + + >>> jaccard_score(y_true, y_pred, average="micro") + 0.6 + + In the multilabel case: + + >>> jaccard_score(y_true, y_pred, average='samples') + 0.5833... + >>> jaccard_score(y_true, y_pred, average='macro') + 0.6666... + >>> jaccard_score(y_true, y_pred, average=None) + array([0.5, 0.5, 1. ]) + + In the multiclass case: + + >>> y_pred = [0, 2, 1, 2] + >>> y_true = [0, 1, 2, 2] + >>> jaccard_score(y_true, y_pred, average=None) + array([1. , 0. , 0.33...]) + """ + labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label) + samplewise = average == "samples" + MCM = multilabel_confusion_matrix( + y_true, + y_pred, + sample_weight=sample_weight, + labels=labels, + samplewise=samplewise, + ) + numerator = MCM[:, 1, 1] + denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0] + + if average == "micro": + numerator = np.array([numerator.sum()]) + denominator = np.array([denominator.sum()]) + + jaccard = _prf_divide( + numerator, + denominator, + "jaccard", + "true or predicted", + average, + ("jaccard",), + zero_division=zero_division, + ) + if average is None: + return jaccard + if average == "weighted": + weights = MCM[:, 1, 0] + MCM[:, 1, 1] + if not np.any(weights): + # numerator is 0, and warning should have already been issued + weights = None + elif average == "samples" and sample_weight is not None: + weights = sample_weight + else: + weights = None + return np.average(jaccard, weights=weights) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def matthews_corrcoef(y_true, y_pred, *, sample_weight=None): + """Compute the Matthews correlation coefficient (MCC). + + The Matthews correlation coefficient is used in machine learning as a + measure of the quality of binary and multiclass classifications. It takes + into account true and false positives and negatives and is generally + regarded as a balanced measure which can be used even if the classes are of + very different sizes. The MCC is in essence a correlation coefficient value + between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 + an average random prediction and -1 an inverse prediction. The statistic + is also known as the phi coefficient. [source: Wikipedia] + + Binary and multiclass labels are supported. Only in the binary case does + this relate to information about true and false positives and negatives. + See references below. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) + Estimated targets as returned by a classifier. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + .. versionadded:: 0.18 + + Returns + ------- + mcc : float + The Matthews correlation coefficient (+1 represents a perfect + prediction, 0 an average random prediction and -1 and inverse + prediction). + + References + ---------- + .. [1] :doi:`Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the + accuracy of prediction algorithms for classification: an overview. + <10.1093/bioinformatics/16.5.412>` + + .. [2] `Wikipedia entry for the Matthews Correlation Coefficient + `_. + + .. [3] `Gorodkin, (2004). Comparing two K-category assignments by a + K-category correlation coefficient + `_. + + .. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN + Error Measures in MultiClass Prediction + `_. + + Examples + -------- + >>> from sklearn.metrics import matthews_corrcoef + >>> y_true = [+1, +1, +1, -1] + >>> y_pred = [+1, -1, +1, +1] + >>> matthews_corrcoef(y_true, y_pred) + -0.33... + """ + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + check_consistent_length(y_true, y_pred, sample_weight) + if y_type not in {"binary", "multiclass"}: + raise ValueError("%s is not supported" % y_type) + + lb = LabelEncoder() + lb.fit(np.hstack([y_true, y_pred])) + y_true = lb.transform(y_true) + y_pred = lb.transform(y_pred) + + C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight) + t_sum = C.sum(axis=1, dtype=np.float64) + p_sum = C.sum(axis=0, dtype=np.float64) + n_correct = np.trace(C, dtype=np.float64) + n_samples = p_sum.sum() + cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum) + cov_ypyp = n_samples**2 - np.dot(p_sum, p_sum) + cov_ytyt = n_samples**2 - np.dot(t_sum, t_sum) + + if cov_ypyp * cov_ytyt == 0: + return 0.0 + else: + return cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp) + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "normalize": ["boolean"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def zero_one_loss(y_true, y_pred, *, normalize=True, sample_weight=None): + """Zero-one classification loss. + + If normalize is ``True``, return the fraction of misclassifications + (float), else it returns the number of misclassifications (int). The best + performance is 0. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) labels. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Predicted labels, as returned by a classifier. + + normalize : bool, default=True + If ``False``, return the number of misclassifications. + Otherwise, return the fraction of misclassifications. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + loss : float or int, + If ``normalize == True``, return the fraction of misclassifications + (float), else it returns the number of misclassifications (int). + + See Also + -------- + accuracy_score : Compute the accuracy score. By default, the function will + return the fraction of correct predictions divided by the total number + of predictions. + hamming_loss : Compute the average Hamming loss or Hamming distance between + two sets of samples. + jaccard_score : Compute the Jaccard similarity coefficient score. + + Notes + ----- + In multilabel classification, the zero_one_loss function corresponds to + the subset zero-one loss: for each sample, the entire set of labels must be + correctly predicted, otherwise the loss for that sample is equal to one. + + Examples + -------- + >>> from sklearn.metrics import zero_one_loss + >>> y_pred = [1, 2, 3, 4] + >>> y_true = [2, 2, 3, 4] + >>> zero_one_loss(y_true, y_pred) + 0.25 + >>> zero_one_loss(y_true, y_pred, normalize=False) + 1.0 + + In the multilabel case with binary label indicators: + + >>> import numpy as np + >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) + 0.5 + """ + xp, _ = get_namespace(y_true, y_pred) + score = accuracy_score( + y_true, y_pred, normalize=normalize, sample_weight=sample_weight + ) + + if normalize: + return 1 - score + else: + if sample_weight is not None: + n_samples = xp.sum(sample_weight) + else: + n_samples = _num_samples(y_true) + return n_samples - score + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "labels": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + "average": [ + StrOptions({"micro", "macro", "samples", "weighted", "binary"}), + None, + ], + "sample_weight": ["array-like", None], + "zero_division": [ + Options(Real, {0.0, 1.0}), + "nan", + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def f1_score( + y_true, + y_pred, + *, + labels=None, + pos_label=1, + average="binary", + sample_weight=None, + zero_division="warn", +): + """Compute the F1 score, also known as balanced F-score or F-measure. + + The F1 score can be interpreted as a harmonic mean of the precision and + recall, where an F1 score reaches its best value at 1 and worst score at 0. + The relative contribution of precision and recall to the F1 score are + equal. The formula for the F1 score is: + + .. math:: + \\text{F1} = \\frac{2 * \\text{TP}}{2 * \\text{TP} + \\text{FP} + \\text{FN}} + + Where :math:`\\text{TP}` is the number of true positives, :math:`\\text{FN}` is the + number of false negatives, and :math:`\\text{FP}` is the number of false positives. + F1 is by default + calculated as 0.0 when there are no true positives, false negatives, or + false positives. + + Support beyond :term:`binary` targets is achieved by treating :term:`multiclass` + and :term:`multilabel` data as a collection of binary problems, one for each + label. For the :term:`binary` case, setting `average='binary'` will return + F1 score for `pos_label`. If `average` is not `'binary'`, `pos_label` is ignored + and F1 score for both classes are computed, then averaged or both returned (when + `average=None`). Similarly, for :term:`multiclass` and :term:`multilabel` targets, + F1 score for all `labels` are either returned or averaged depending on the + `average` parameter. Use `labels` specify the set of labels to calculate F1 score + for. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + labels : array-like, default=None + The set of labels to include when `average != 'binary'`, and their + order if `average is None`. Labels present in the data can be + excluded, for example in multiclass classification to exclude a "negative + class". Labels not present in the data can be included and will be + "assigned" 0 samples. For multilabel targets, labels are column indices. + By default, all labels in `y_true` and `y_pred` are used in sorted order. + + .. versionchanged:: 0.17 + Parameter `labels` improved for multiclass problem. + + pos_label : int, float, bool or str, default=1 + The class to report if `average='binary'` and the data is binary, + otherwise this parameter is ignored. + For multiclass or multilabel targets, set `labels=[pos_label]` and + `average != 'binary'` to report metrics for one label only. + + average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \ + default='binary' + This parameter is required for multiclass/multilabel targets. + If ``None``, the scores for each class are returned. Otherwise, this + determines the type of averaging performed on the data: + + ``'binary'``: + Only report results for the class specified by ``pos_label``. + This is applicable only if targets (``y_{true,pred}``) are binary. + ``'micro'``: + Calculate metrics globally by counting the total true positives, + false negatives and false positives. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average weighted + by support (the number of true instances for each label). This + alters 'macro' to account for label imbalance; it can result in an + F-score that is not between precision and recall. + ``'samples'``: + Calculate metrics for each instance, and find their average (only + meaningful for multilabel classification where this differs from + :func:`accuracy_score`). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" + Sets the value to return when there is a zero division, i.e. when all + predictions and labels are negative. + + Notes: + - If set to "warn", this acts like 0, but a warning is also raised. + - If set to `np.nan`, such values will be excluded from the average. + + .. versionadded:: 1.3 + `np.nan` option was added. + + Returns + ------- + f1_score : float or array of float, shape = [n_unique_labels] + F1 score of the positive class in binary classification or weighted + average of the F1 scores of each class for the multiclass task. + + See Also + -------- + fbeta_score : Compute the F-beta score. + precision_recall_fscore_support : Compute the precision, recall, F-score, + and support. + jaccard_score : Compute the Jaccard similarity coefficient score. + multilabel_confusion_matrix : Compute a confusion matrix for each class or + sample. + + Notes + ----- + When ``true positive + false positive + false negative == 0`` (i.e. a class + is completely absent from both ``y_true`` or ``y_pred``), f-score is + undefined. In such cases, by default f-score will be set to 0.0, and + ``UndefinedMetricWarning`` will be raised. This behavior can be modified by + setting the ``zero_division`` parameter. + + References + ---------- + .. [1] `Wikipedia entry for the F1-score + `_. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import f1_score + >>> y_true = [0, 1, 2, 0, 1, 2] + >>> y_pred = [0, 2, 1, 0, 0, 1] + >>> f1_score(y_true, y_pred, average='macro') + 0.26... + >>> f1_score(y_true, y_pred, average='micro') + 0.33... + >>> f1_score(y_true, y_pred, average='weighted') + 0.26... + >>> f1_score(y_true, y_pred, average=None) + array([0.8, 0. , 0. ]) + + >>> # binary classification + >>> y_true_empty = [0, 0, 0, 0, 0, 0] + >>> y_pred_empty = [0, 0, 0, 0, 0, 0] + >>> f1_score(y_true_empty, y_pred_empty) + 0.0... + >>> f1_score(y_true_empty, y_pred_empty, zero_division=1.0) + 1.0... + >>> f1_score(y_true_empty, y_pred_empty, zero_division=np.nan) + nan... + + >>> # multilabel classification + >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]] + >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]] + >>> f1_score(y_true, y_pred, average=None) + array([0.66666667, 1. , 0.66666667]) + """ + return fbeta_score( + y_true, + y_pred, + beta=1, + labels=labels, + pos_label=pos_label, + average=average, + sample_weight=sample_weight, + zero_division=zero_division, + ) + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "beta": [Interval(Real, 0.0, None, closed="both")], + "labels": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + "average": [ + StrOptions({"micro", "macro", "samples", "weighted", "binary"}), + None, + ], + "sample_weight": ["array-like", None], + "zero_division": [ + Options(Real, {0.0, 1.0}), + "nan", + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def fbeta_score( + y_true, + y_pred, + *, + beta, + labels=None, + pos_label=1, + average="binary", + sample_weight=None, + zero_division="warn", +): + """Compute the F-beta score. + + The F-beta score is the weighted harmonic mean of precision and recall, + reaching its optimal value at 1 and its worst value at 0. + + The `beta` parameter represents the ratio of recall importance to + precision importance. `beta > 1` gives more weight to recall, while + `beta < 1` favors precision. For example, `beta = 2` makes recall twice + as important as precision, while `beta = 0.5` does the opposite. + Asymptotically, `beta -> +inf` considers only recall, and `beta -> 0` + only precision. + + The formula for F-beta score is: + + .. math:: + + F_\\beta = \\frac{(1 + \\beta^2) \\text{tp}} + {(1 + \\beta^2) \\text{tp} + \\text{fp} + \\beta^2 \\text{fn}} + + Where :math:`\\text{tp}` is the number of true positives, :math:`\\text{fp}` is the + number of false positives, and :math:`\\text{fn}` is the number of false negatives. + + Support beyond term:`binary` targets is achieved by treating :term:`multiclass` + and :term:`multilabel` data as a collection of binary problems, one for each + label. For the :term:`binary` case, setting `average='binary'` will return + F-beta score for `pos_label`. If `average` is not `'binary'`, `pos_label` is + ignored and F-beta score for both classes are computed, then averaged or both + returned (when `average=None`). Similarly, for :term:`multiclass` and + :term:`multilabel` targets, F-beta score for all `labels` are either returned or + averaged depending on the `average` parameter. Use `labels` specify the set of + labels to calculate F-beta score for. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + beta : float + Determines the weight of recall in the combined score. + + labels : array-like, default=None + The set of labels to include when `average != 'binary'`, and their + order if `average is None`. Labels present in the data can be + excluded, for example in multiclass classification to exclude a "negative + class". Labels not present in the data can be included and will be + "assigned" 0 samples. For multilabel targets, labels are column indices. + By default, all labels in `y_true` and `y_pred` are used in sorted order. + + .. versionchanged:: 0.17 + Parameter `labels` improved for multiclass problem. + + pos_label : int, float, bool or str, default=1 + The class to report if `average='binary'` and the data is binary, + otherwise this parameter is ignored. + For multiclass or multilabel targets, set `labels=[pos_label]` and + `average != 'binary'` to report metrics for one label only. + + average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \ + default='binary' + This parameter is required for multiclass/multilabel targets. + If ``None``, the scores for each class are returned. Otherwise, this + determines the type of averaging performed on the data: + + ``'binary'``: + Only report results for the class specified by ``pos_label``. + This is applicable only if targets (``y_{true,pred}``) are binary. + ``'micro'``: + Calculate metrics globally by counting the total true positives, + false negatives and false positives. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average weighted + by support (the number of true instances for each label). This + alters 'macro' to account for label imbalance; it can result in an + F-score that is not between precision and recall. + ``'samples'``: + Calculate metrics for each instance, and find their average (only + meaningful for multilabel classification where this differs from + :func:`accuracy_score`). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" + Sets the value to return when there is a zero division, i.e. when all + predictions and labels are negative. + + Notes: + - If set to "warn", this acts like 0, but a warning is also raised. + - If set to `np.nan`, such values will be excluded from the average. + + .. versionadded:: 1.3 + `np.nan` option was added. + + Returns + ------- + fbeta_score : float (if average is not None) or array of float, shape =\ + [n_unique_labels] + F-beta score of the positive class in binary classification or weighted + average of the F-beta score of each class for the multiclass task. + + See Also + -------- + precision_recall_fscore_support : Compute the precision, recall, F-score, + and support. + multilabel_confusion_matrix : Compute a confusion matrix for each class or + sample. + + Notes + ----- + When ``true positive + false positive + false negative == 0``, f-score + returns 0.0 and raises ``UndefinedMetricWarning``. This behavior can be + modified by setting ``zero_division``. + + References + ---------- + .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011). + Modern Information Retrieval. Addison Wesley, pp. 327-328. + + .. [2] `Wikipedia entry for the F1-score + `_. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import fbeta_score + >>> y_true = [0, 1, 2, 0, 1, 2] + >>> y_pred = [0, 2, 1, 0, 0, 1] + >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5) + 0.23... + >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5) + 0.33... + >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5) + 0.23... + >>> fbeta_score(y_true, y_pred, average=None, beta=0.5) + array([0.71..., 0. , 0. ]) + >>> y_pred_empty = [0, 0, 0, 0, 0, 0] + >>> fbeta_score(y_true, y_pred_empty, + ... average="macro", zero_division=np.nan, beta=0.5) + 0.12... + """ + + _, _, f, _ = precision_recall_fscore_support( + y_true, + y_pred, + beta=beta, + labels=labels, + pos_label=pos_label, + average=average, + warn_for=("f-score",), + sample_weight=sample_weight, + zero_division=zero_division, + ) + return f + + +def _prf_divide( + numerator, denominator, metric, modifier, average, warn_for, zero_division="warn" +): + """Performs division and handles divide-by-zero. + + On zero-division, sets the corresponding result elements equal to + 0, 1 or np.nan (according to ``zero_division``). Plus, if + ``zero_division != "warn"`` raises a warning. + + The metric, modifier and average arguments are used only for determining + an appropriate warning. + """ + mask = denominator == 0.0 + denominator = denominator.copy() + denominator[mask] = 1 # avoid infs/nans + result = numerator / denominator + + if not np.any(mask): + return result + + # set those with 0 denominator to `zero_division`, and 0 when "warn" + zero_division_value = _check_zero_division(zero_division) + result[mask] = zero_division_value + + # we assume the user will be removing warnings if zero_division is set + # to something different than "warn". If we are computing only f-score + # the warning will be raised only if precision and recall are ill-defined + if zero_division != "warn" or metric not in warn_for: + return result + + # build appropriate warning + if metric in warn_for: + _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result)) + + return result + + +def _warn_prf(average, modifier, msg_start, result_size): + axis0, axis1 = "sample", "label" + if average == "samples": + axis0, axis1 = axis1, axis0 + msg = ( + "{0} ill-defined and being set to 0.0 {{0}} " + "no {1} {2}s. Use `zero_division` parameter to control" + " this behavior.".format(msg_start, modifier, axis0) + ) + if result_size == 1: + msg = msg.format("due to") + else: + msg = msg.format("in {0}s with".format(axis1)) + warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) + + +def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label): + """Validation associated with set-wise metrics. + + Returns identified labels. + """ + average_options = (None, "micro", "macro", "weighted", "samples") + if average not in average_options and average != "binary": + raise ValueError("average has to be one of " + str(average_options)) + + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + # Convert to Python primitive type to avoid NumPy type / Python str + # comparison. See https://github.com/numpy/numpy/issues/6784 + present_labels = unique_labels(y_true, y_pred).tolist() + if average == "binary": + if y_type == "binary": + if pos_label not in present_labels: + if len(present_labels) >= 2: + raise ValueError( + f"pos_label={pos_label} is not a valid label. It " + f"should be one of {present_labels}" + ) + labels = [pos_label] + else: + average_options = list(average_options) + if y_type == "multiclass": + average_options.remove("samples") + raise ValueError( + "Target is %s but average='binary'. Please " + "choose another average setting, one of %r." % (y_type, average_options) + ) + elif pos_label not in (None, 1): + warnings.warn( + "Note that pos_label (set to %r) is ignored when " + "average != 'binary' (got %r). You may use " + "labels=[pos_label] to specify a single positive class." + % (pos_label, average), + UserWarning, + ) + return labels + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "beta": [Interval(Real, 0.0, None, closed="both")], + "labels": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + "average": [ + StrOptions({"micro", "macro", "samples", "weighted", "binary"}), + None, + ], + "warn_for": [list, tuple, set], + "sample_weight": ["array-like", None], + "zero_division": [ + Options(Real, {0.0, 1.0}), + "nan", + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def precision_recall_fscore_support( + y_true, + y_pred, + *, + beta=1.0, + labels=None, + pos_label=1, + average=None, + warn_for=("precision", "recall", "f-score"), + sample_weight=None, + zero_division="warn", +): + """Compute precision, recall, F-measure and support for each class. + + The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of + true positives and ``fp`` the number of false positives. The precision is + intuitively the ability of the classifier not to label a negative sample as + positive. + + The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of + true positives and ``fn`` the number of false negatives. The recall is + intuitively the ability of the classifier to find all the positive samples. + + The F-beta score can be interpreted as a weighted harmonic mean of + the precision and recall, where an F-beta score reaches its best + value at 1 and worst score at 0. + + The F-beta score weights recall more than precision by a factor of + ``beta``. ``beta == 1.0`` means recall and precision are equally important. + + The support is the number of occurrences of each class in ``y_true``. + + Support beyond term:`binary` targets is achieved by treating :term:`multiclass` + and :term:`multilabel` data as a collection of binary problems, one for each + label. For the :term:`binary` case, setting `average='binary'` will return + metrics for `pos_label`. If `average` is not `'binary'`, `pos_label` is ignored + and metrics for both classes are computed, then averaged or both returned (when + `average=None`). Similarly, for :term:`multiclass` and :term:`multilabel` targets, + metrics for all `labels` are either returned or averaged depending on the `average` + parameter. Use `labels` specify the set of labels to calculate metrics for. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + beta : float, default=1.0 + The strength of recall versus precision in the F-score. + + labels : array-like, default=None + The set of labels to include when `average != 'binary'`, and their + order if `average is None`. Labels present in the data can be + excluded, for example in multiclass classification to exclude a "negative + class". Labels not present in the data can be included and will be + "assigned" 0 samples. For multilabel targets, labels are column indices. + By default, all labels in `y_true` and `y_pred` are used in sorted order. + + pos_label : int, float, bool or str, default=1 + The class to report if `average='binary'` and the data is binary, + otherwise this parameter is ignored. + For multiclass or multilabel targets, set `labels=[pos_label]` and + `average != 'binary'` to report metrics for one label only. + + average : {'binary', 'micro', 'macro', 'samples', 'weighted'}, \ + default=None + If ``None``, the metrics for each class are returned. Otherwise, this + determines the type of averaging performed on the data: + + ``'binary'``: + Only report results for the class specified by ``pos_label``. + This is applicable only if targets (``y_{true,pred}``) are binary. + ``'micro'``: + Calculate metrics globally by counting the total true positives, + false negatives and false positives. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average weighted + by support (the number of true instances for each label). This + alters 'macro' to account for label imbalance; it can result in an + F-score that is not between precision and recall. + ``'samples'``: + Calculate metrics for each instance, and find their average (only + meaningful for multilabel classification where this differs from + :func:`accuracy_score`). + + warn_for : list, tuple or set, for internal use + This determines which warnings will be made in the case that this + function is being used to return only one of its metrics. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" + Sets the value to return when there is a zero division: + - recall: when there are no positive labels + - precision: when there are no positive predictions + - f-score: both + + Notes: + - If set to "warn", this acts like 0, but a warning is also raised. + - If set to `np.nan`, such values will be excluded from the average. + + .. versionadded:: 1.3 + `np.nan` option was added. + + Returns + ------- + precision : float (if average is not None) or array of float, shape =\ + [n_unique_labels] + Precision score. + + recall : float (if average is not None) or array of float, shape =\ + [n_unique_labels] + Recall score. + + fbeta_score : float (if average is not None) or array of float, shape =\ + [n_unique_labels] + F-beta score. + + support : None (if average is not None) or array of int, shape =\ + [n_unique_labels] + The number of occurrences of each label in ``y_true``. + + Notes + ----- + When ``true positive + false positive == 0``, precision is undefined. + When ``true positive + false negative == 0``, recall is undefined. When + ``true positive + false negative + false positive == 0``, f-score is + undefined. In such cases, by default the metric will be set to 0, and + ``UndefinedMetricWarning`` will be raised. This behavior can be modified + with ``zero_division``. + + References + ---------- + .. [1] `Wikipedia entry for the Precision and recall + `_. + + .. [2] `Wikipedia entry for the F1-score + `_. + + .. [3] `Discriminative Methods for Multi-labeled Classification Advances + in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu + Godbole, Sunita Sarawagi + `_. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import precision_recall_fscore_support + >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) + >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) + >>> precision_recall_fscore_support(y_true, y_pred, average='macro') + (0.22..., 0.33..., 0.26..., None) + >>> precision_recall_fscore_support(y_true, y_pred, average='micro') + (0.33..., 0.33..., 0.33..., None) + >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') + (0.22..., 0.33..., 0.26..., None) + + It is possible to compute per-label precisions, recalls, F1-scores and + supports instead of averaging: + + >>> precision_recall_fscore_support(y_true, y_pred, average=None, + ... labels=['pig', 'dog', 'cat']) + (array([0. , 0. , 0.66...]), + array([0., 0., 1.]), array([0. , 0. , 0.8]), + array([2, 2, 2])) + """ + _check_zero_division(zero_division) + labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label) + + # Calculate tp_sum, pred_sum, true_sum ### + samplewise = average == "samples" + MCM = multilabel_confusion_matrix( + y_true, + y_pred, + sample_weight=sample_weight, + labels=labels, + samplewise=samplewise, + ) + tp_sum = MCM[:, 1, 1] + pred_sum = tp_sum + MCM[:, 0, 1] + true_sum = tp_sum + MCM[:, 1, 0] + + if average == "micro": + tp_sum = np.array([tp_sum.sum()]) + pred_sum = np.array([pred_sum.sum()]) + true_sum = np.array([true_sum.sum()]) + + # Finally, we have all our sufficient statistics. Divide! # + beta2 = beta**2 + + # Divide, and on zero-division, set scores and/or warn according to + # zero_division: + precision = _prf_divide( + tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division + ) + recall = _prf_divide( + tp_sum, true_sum, "recall", "true", average, warn_for, zero_division + ) + + if np.isposinf(beta): + f_score = recall + elif beta == 0: + f_score = precision + else: + # The score is defined as: + # score = (1 + beta**2) * precision * recall / (beta**2 * precision + recall) + # Therefore, we can express the score in terms of confusion matrix entries as: + # score = (1 + beta**2) * tp / ((1 + beta**2) * tp + beta**2 * fn + fp) + denom = beta2 * true_sum + pred_sum + f_score = _prf_divide( + (1 + beta2) * tp_sum, + denom, + "f-score", + "true nor predicted", + average, + warn_for, + zero_division, + ) + + # Average the results + if average == "weighted": + weights = true_sum + elif average == "samples": + weights = sample_weight + else: + weights = None + + if average is not None: + assert average != "binary" or len(precision) == 1 + precision = _nanaverage(precision, weights=weights) + recall = _nanaverage(recall, weights=weights) + f_score = _nanaverage(f_score, weights=weights) + true_sum = None # return no support + + return precision, recall, f_score, true_sum + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "labels": ["array-like", None], + "sample_weight": ["array-like", None], + "raise_warning": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def class_likelihood_ratios( + y_true, + y_pred, + *, + labels=None, + sample_weight=None, + raise_warning=True, +): + """Compute binary classification positive and negative likelihood ratios. + + The positive likelihood ratio is `LR+ = sensitivity / (1 - specificity)` + where the sensitivity or recall is the ratio `tp / (tp + fn)` and the + specificity is `tn / (tn + fp)`. The negative likelihood ratio is `LR- = (1 + - sensitivity) / specificity`. Here `tp` is the number of true positives, + `fp` the number of false positives, `tn` is the number of true negatives and + `fn` the number of false negatives. Both class likelihood ratios can be used + to obtain post-test probabilities given a pre-test probability. + + `LR+` ranges from 1 to infinity. A `LR+` of 1 indicates that the probability + of predicting the positive class is the same for samples belonging to either + class; therefore, the test is useless. The greater `LR+` is, the more a + positive prediction is likely to be a true positive when compared with the + pre-test probability. A value of `LR+` lower than 1 is invalid as it would + indicate that the odds of a sample being a true positive decrease with + respect to the pre-test odds. + + `LR-` ranges from 0 to 1. The closer it is to 0, the lower the probability + of a given sample to be a false negative. A `LR-` of 1 means the test is + useless because the odds of having the condition did not change after the + test. A value of `LR-` greater than 1 invalidates the classifier as it + indicates an increase in the odds of a sample belonging to the positive + class after being classified as negative. This is the case when the + classifier systematically predicts the opposite of the true label. + + A typical application in medicine is to identify the positive/negative class + to the presence/absence of a disease, respectively; the classifier being a + diagnostic test; the pre-test probability of an individual having the + disease can be the prevalence of such disease (proportion of a particular + population found to be affected by a medical condition); and the post-test + probabilities would be the probability that the condition is truly present + given a positive test result. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + labels : array-like, default=None + List of labels to index the matrix. This may be used to select the + positive and negative classes with the ordering `labels=[negative_class, + positive_class]`. If `None` is given, those that appear at least once in + `y_true` or `y_pred` are used in sorted order. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + raise_warning : bool, default=True + Whether or not a case-specific warning message is raised when there is a + zero division. Even if the error is not raised, the function will return + nan in such cases. + + Returns + ------- + (positive_likelihood_ratio, negative_likelihood_ratio) : tuple + A tuple of two float, the first containing the Positive likelihood ratio + and the second the Negative likelihood ratio. + + Warns + ----- + When `false positive == 0`, the positive likelihood ratio is undefined. + When `true negative == 0`, the negative likelihood ratio is undefined. + When `true positive + false negative == 0` both ratios are undefined. + In such cases, `UserWarning` will be raised if raise_warning=True. + + References + ---------- + .. [1] `Wikipedia entry for the Likelihood ratios in diagnostic testing + `_. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import class_likelihood_ratios + >>> class_likelihood_ratios([0, 1, 0, 1, 0], [1, 1, 0, 0, 0]) + (1.5, 0.75) + >>> y_true = np.array(["non-cat", "cat", "non-cat", "cat", "non-cat"]) + >>> y_pred = np.array(["cat", "cat", "non-cat", "non-cat", "non-cat"]) + >>> class_likelihood_ratios(y_true, y_pred) + (1.33..., 0.66...) + >>> y_true = np.array(["non-zebra", "zebra", "non-zebra", "zebra", "non-zebra"]) + >>> y_pred = np.array(["zebra", "zebra", "non-zebra", "non-zebra", "non-zebra"]) + >>> class_likelihood_ratios(y_true, y_pred) + (1.5, 0.75) + + To avoid ambiguities, use the notation `labels=[negative_class, + positive_class]` + + >>> y_true = np.array(["non-cat", "cat", "non-cat", "cat", "non-cat"]) + >>> y_pred = np.array(["cat", "cat", "non-cat", "non-cat", "non-cat"]) + >>> class_likelihood_ratios(y_true, y_pred, labels=["non-cat", "cat"]) + (1.5, 0.75) + """ + + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + if y_type != "binary": + raise ValueError( + "class_likelihood_ratios only supports binary classification " + f"problems, got targets of type: {y_type}" + ) + + cm = confusion_matrix( + y_true, + y_pred, + sample_weight=sample_weight, + labels=labels, + ) + + # Case when `y_test` contains a single class and `y_test == y_pred`. + # This may happen when cross-validating imbalanced data and should + # not be interpreted as a perfect score. + if cm.shape == (1, 1): + msg = "samples of only one class were seen during testing " + if raise_warning: + warnings.warn(msg, UserWarning, stacklevel=2) + positive_likelihood_ratio = np.nan + negative_likelihood_ratio = np.nan + else: + tn, fp, fn, tp = cm.ravel() + support_pos = tp + fn + support_neg = tn + fp + pos_num = tp * support_neg + pos_denom = fp * support_pos + neg_num = fn * support_neg + neg_denom = tn * support_pos + + # If zero division warn and set scores to nan, else divide + if support_pos == 0: + msg = "no samples of the positive class were present in the testing set " + if raise_warning: + warnings.warn(msg, UserWarning, stacklevel=2) + positive_likelihood_ratio = np.nan + negative_likelihood_ratio = np.nan + if fp == 0: + if tp == 0: + msg = "no samples predicted for the positive class" + else: + msg = "positive_likelihood_ratio ill-defined and being set to nan " + if raise_warning: + warnings.warn(msg, UserWarning, stacklevel=2) + positive_likelihood_ratio = np.nan + else: + positive_likelihood_ratio = pos_num / pos_denom + if tn == 0: + msg = "negative_likelihood_ratio ill-defined and being set to nan " + if raise_warning: + warnings.warn(msg, UserWarning, stacklevel=2) + negative_likelihood_ratio = np.nan + else: + negative_likelihood_ratio = neg_num / neg_denom + + return positive_likelihood_ratio, negative_likelihood_ratio + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "labels": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + "average": [ + StrOptions({"micro", "macro", "samples", "weighted", "binary"}), + None, + ], + "sample_weight": ["array-like", None], + "zero_division": [ + Options(Real, {0.0, 1.0}), + "nan", + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def precision_score( + y_true, + y_pred, + *, + labels=None, + pos_label=1, + average="binary", + sample_weight=None, + zero_division="warn", +): + """Compute the precision. + + The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of + true positives and ``fp`` the number of false positives. The precision is + intuitively the ability of the classifier not to label as positive a sample + that is negative. + + The best value is 1 and the worst value is 0. + + Support beyond term:`binary` targets is achieved by treating :term:`multiclass` + and :term:`multilabel` data as a collection of binary problems, one for each + label. For the :term:`binary` case, setting `average='binary'` will return + precision for `pos_label`. If `average` is not `'binary'`, `pos_label` is ignored + and precision for both classes are computed, then averaged or both returned (when + `average=None`). Similarly, for :term:`multiclass` and :term:`multilabel` targets, + precision for all `labels` are either returned or averaged depending on the + `average` parameter. Use `labels` specify the set of labels to calculate precision + for. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + labels : array-like, default=None + The set of labels to include when `average != 'binary'`, and their + order if `average is None`. Labels present in the data can be + excluded, for example in multiclass classification to exclude a "negative + class". Labels not present in the data can be included and will be + "assigned" 0 samples. For multilabel targets, labels are column indices. + By default, all labels in `y_true` and `y_pred` are used in sorted order. + + .. versionchanged:: 0.17 + Parameter `labels` improved for multiclass problem. + + pos_label : int, float, bool or str, default=1 + The class to report if `average='binary'` and the data is binary, + otherwise this parameter is ignored. + For multiclass or multilabel targets, set `labels=[pos_label]` and + `average != 'binary'` to report metrics for one label only. + + average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \ + default='binary' + This parameter is required for multiclass/multilabel targets. + If ``None``, the scores for each class are returned. Otherwise, this + determines the type of averaging performed on the data: + + ``'binary'``: + Only report results for the class specified by ``pos_label``. + This is applicable only if targets (``y_{true,pred}``) are binary. + ``'micro'``: + Calculate metrics globally by counting the total true positives, + false negatives and false positives. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average weighted + by support (the number of true instances for each label). This + alters 'macro' to account for label imbalance; it can result in an + F-score that is not between precision and recall. + ``'samples'``: + Calculate metrics for each instance, and find their average (only + meaningful for multilabel classification where this differs from + :func:`accuracy_score`). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" + Sets the value to return when there is a zero division. + + Notes: + - If set to "warn", this acts like 0, but a warning is also raised. + - If set to `np.nan`, such values will be excluded from the average. + + .. versionadded:: 1.3 + `np.nan` option was added. + + Returns + ------- + precision : float (if average is not None) or array of float of shape \ + (n_unique_labels,) + Precision of the positive class in binary classification or weighted + average of the precision of each class for the multiclass task. + + See Also + -------- + precision_recall_fscore_support : Compute precision, recall, F-measure and + support for each class. + recall_score : Compute the ratio ``tp / (tp + fn)`` where ``tp`` is the + number of true positives and ``fn`` the number of false negatives. + PrecisionRecallDisplay.from_estimator : Plot precision-recall curve given + an estimator and some data. + PrecisionRecallDisplay.from_predictions : Plot precision-recall curve given + binary class predictions. + multilabel_confusion_matrix : Compute a confusion matrix for each class or + sample. + + Notes + ----- + When ``true positive + false positive == 0``, precision returns 0 and + raises ``UndefinedMetricWarning``. This behavior can be + modified with ``zero_division``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import precision_score + >>> y_true = [0, 1, 2, 0, 1, 2] + >>> y_pred = [0, 2, 1, 0, 0, 1] + >>> precision_score(y_true, y_pred, average='macro') + 0.22... + >>> precision_score(y_true, y_pred, average='micro') + 0.33... + >>> precision_score(y_true, y_pred, average='weighted') + 0.22... + >>> precision_score(y_true, y_pred, average=None) + array([0.66..., 0. , 0. ]) + >>> y_pred = [0, 0, 0, 0, 0, 0] + >>> precision_score(y_true, y_pred, average=None) + array([0.33..., 0. , 0. ]) + >>> precision_score(y_true, y_pred, average=None, zero_division=1) + array([0.33..., 1. , 1. ]) + >>> precision_score(y_true, y_pred, average=None, zero_division=np.nan) + array([0.33..., nan, nan]) + + >>> # multilabel classification + >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]] + >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]] + >>> precision_score(y_true, y_pred, average=None) + array([0.5, 1. , 1. ]) + """ + p, _, _, _ = precision_recall_fscore_support( + y_true, + y_pred, + labels=labels, + pos_label=pos_label, + average=average, + warn_for=("precision",), + sample_weight=sample_weight, + zero_division=zero_division, + ) + return p + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "labels": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + "average": [ + StrOptions({"micro", "macro", "samples", "weighted", "binary"}), + None, + ], + "sample_weight": ["array-like", None], + "zero_division": [ + Options(Real, {0.0, 1.0}), + "nan", + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def recall_score( + y_true, + y_pred, + *, + labels=None, + pos_label=1, + average="binary", + sample_weight=None, + zero_division="warn", +): + """Compute the recall. + + The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of + true positives and ``fn`` the number of false negatives. The recall is + intuitively the ability of the classifier to find all the positive samples. + + The best value is 1 and the worst value is 0. + + Support beyond term:`binary` targets is achieved by treating :term:`multiclass` + and :term:`multilabel` data as a collection of binary problems, one for each + label. For the :term:`binary` case, setting `average='binary'` will return + recall for `pos_label`. If `average` is not `'binary'`, `pos_label` is ignored + and recall for both classes are computed then averaged or both returned (when + `average=None`). Similarly, for :term:`multiclass` and :term:`multilabel` targets, + recall for all `labels` are either returned or averaged depending on the `average` + parameter. Use `labels` specify the set of labels to calculate recall for. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + labels : array-like, default=None + The set of labels to include when `average != 'binary'`, and their + order if `average is None`. Labels present in the data can be + excluded, for example in multiclass classification to exclude a "negative + class". Labels not present in the data can be included and will be + "assigned" 0 samples. For multilabel targets, labels are column indices. + By default, all labels in `y_true` and `y_pred` are used in sorted order. + + .. versionchanged:: 0.17 + Parameter `labels` improved for multiclass problem. + + pos_label : int, float, bool or str, default=1 + The class to report if `average='binary'` and the data is binary, + otherwise this parameter is ignored. + For multiclass or multilabel targets, set `labels=[pos_label]` and + `average != 'binary'` to report metrics for one label only. + + average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \ + default='binary' + This parameter is required for multiclass/multilabel targets. + If ``None``, the scores for each class are returned. Otherwise, this + determines the type of averaging performed on the data: + + ``'binary'``: + Only report results for the class specified by ``pos_label``. + This is applicable only if targets (``y_{true,pred}``) are binary. + ``'micro'``: + Calculate metrics globally by counting the total true positives, + false negatives and false positives. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average weighted + by support (the number of true instances for each label). This + alters 'macro' to account for label imbalance; it can result in an + F-score that is not between precision and recall. Weighted recall + is equal to accuracy. + ``'samples'``: + Calculate metrics for each instance, and find their average (only + meaningful for multilabel classification where this differs from + :func:`accuracy_score`). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" + Sets the value to return when there is a zero division. + + Notes: + - If set to "warn", this acts like 0, but a warning is also raised. + - If set to `np.nan`, such values will be excluded from the average. + + .. versionadded:: 1.3 + `np.nan` option was added. + + Returns + ------- + recall : float (if average is not None) or array of float of shape \ + (n_unique_labels,) + Recall of the positive class in binary classification or weighted + average of the recall of each class for the multiclass task. + + See Also + -------- + precision_recall_fscore_support : Compute precision, recall, F-measure and + support for each class. + precision_score : Compute the ratio ``tp / (tp + fp)`` where ``tp`` is the + number of true positives and ``fp`` the number of false positives. + balanced_accuracy_score : Compute balanced accuracy to deal with imbalanced + datasets. + multilabel_confusion_matrix : Compute a confusion matrix for each class or + sample. + PrecisionRecallDisplay.from_estimator : Plot precision-recall curve given + an estimator and some data. + PrecisionRecallDisplay.from_predictions : Plot precision-recall curve given + binary class predictions. + + Notes + ----- + When ``true positive + false negative == 0``, recall returns 0 and raises + ``UndefinedMetricWarning``. This behavior can be modified with + ``zero_division``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import recall_score + >>> y_true = [0, 1, 2, 0, 1, 2] + >>> y_pred = [0, 2, 1, 0, 0, 1] + >>> recall_score(y_true, y_pred, average='macro') + 0.33... + >>> recall_score(y_true, y_pred, average='micro') + 0.33... + >>> recall_score(y_true, y_pred, average='weighted') + 0.33... + >>> recall_score(y_true, y_pred, average=None) + array([1., 0., 0.]) + >>> y_true = [0, 0, 0, 0, 0, 0] + >>> recall_score(y_true, y_pred, average=None) + array([0.5, 0. , 0. ]) + >>> recall_score(y_true, y_pred, average=None, zero_division=1) + array([0.5, 1. , 1. ]) + >>> recall_score(y_true, y_pred, average=None, zero_division=np.nan) + array([0.5, nan, nan]) + + >>> # multilabel classification + >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]] + >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]] + >>> recall_score(y_true, y_pred, average=None) + array([1. , 1. , 0.5]) + """ + _, r, _, _ = precision_recall_fscore_support( + y_true, + y_pred, + labels=labels, + pos_label=pos_label, + average=average, + warn_for=("recall",), + sample_weight=sample_weight, + zero_division=zero_division, + ) + return r + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "adjusted": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def balanced_accuracy_score(y_true, y_pred, *, sample_weight=None, adjusted=False): + """Compute the balanced accuracy. + + The balanced accuracy in binary and multiclass classification problems to + deal with imbalanced datasets. It is defined as the average of recall + obtained on each class. + + The best value is 1 and the worst value is 0 when ``adjusted=False``. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) + Estimated targets as returned by a classifier. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + adjusted : bool, default=False + When true, the result is adjusted for chance, so that random + performance would score 0, while keeping perfect performance at a score + of 1. + + Returns + ------- + balanced_accuracy : float + Balanced accuracy score. + + See Also + -------- + average_precision_score : Compute average precision (AP) from prediction + scores. + precision_score : Compute the precision score. + recall_score : Compute the recall score. + roc_auc_score : Compute Area Under the Receiver Operating Characteristic + Curve (ROC AUC) from prediction scores. + + Notes + ----- + Some literature promotes alternative definitions of balanced accuracy. Our + definition is equivalent to :func:`accuracy_score` with class-balanced + sample weights, and shares desirable properties with the binary case. + See the :ref:`User Guide `. + + References + ---------- + .. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010). + The balanced accuracy and its posterior distribution. + Proceedings of the 20th International Conference on Pattern + Recognition, 3121-24. + .. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015). + `Fundamentals of Machine Learning for Predictive Data Analytics: + Algorithms, Worked Examples, and Case Studies + `_. + + Examples + -------- + >>> from sklearn.metrics import balanced_accuracy_score + >>> y_true = [0, 1, 0, 0, 1, 0] + >>> y_pred = [0, 1, 0, 0, 0, 1] + >>> balanced_accuracy_score(y_true, y_pred) + 0.625 + """ + C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight) + with np.errstate(divide="ignore", invalid="ignore"): + per_class = np.diag(C) / C.sum(axis=1) + if np.any(np.isnan(per_class)): + warnings.warn("y_pred contains classes not in y_true") + per_class = per_class[~np.isnan(per_class)] + score = np.mean(per_class) + if adjusted: + n_classes = len(per_class) + chance = 1 / n_classes + score -= chance + score /= 1 - chance + return score + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "labels": ["array-like", None], + "target_names": ["array-like", None], + "sample_weight": ["array-like", None], + "digits": [Interval(Integral, 0, None, closed="left")], + "output_dict": ["boolean"], + "zero_division": [ + Options(Real, {0.0, 1.0}), + "nan", + StrOptions({"warn"}), + ], + }, + prefer_skip_nested_validation=True, +) +def classification_report( + y_true, + y_pred, + *, + labels=None, + target_names=None, + sample_weight=None, + digits=2, + output_dict=False, + zero_division="warn", +): + """Build a text report showing the main classification metrics. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) target values. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Estimated targets as returned by a classifier. + + labels : array-like of shape (n_labels,), default=None + Optional list of label indices to include in the report. + + target_names : array-like of shape (n_labels,), default=None + Optional display names matching the labels (same order). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + digits : int, default=2 + Number of digits for formatting output floating point values. + When ``output_dict`` is ``True``, this will be ignored and the + returned values will not be rounded. + + output_dict : bool, default=False + If True, return output as dict. + + .. versionadded:: 0.20 + + zero_division : {"warn", 0.0, 1.0, np.nan}, default="warn" + Sets the value to return when there is a zero division. If set to + "warn", this acts as 0, but warnings are also raised. + + .. versionadded:: 1.3 + `np.nan` option was added. + + Returns + ------- + report : str or dict + Text summary of the precision, recall, F1 score for each class. + Dictionary returned if output_dict is True. Dictionary has the + following structure:: + + {'label 1': {'precision':0.5, + 'recall':1.0, + 'f1-score':0.67, + 'support':1}, + 'label 2': { ... }, + ... + } + + The reported averages include macro average (averaging the unweighted + mean per label), weighted average (averaging the support-weighted mean + per label), and sample average (only for multilabel classification). + Micro average (averaging the total true positives, false negatives and + false positives) is only shown for multi-label or multi-class + with a subset of classes, because it corresponds to accuracy + otherwise and would be the same for all metrics. + See also :func:`precision_recall_fscore_support` for more details + on averages. + + Note that in binary classification, recall of the positive class + is also known as "sensitivity"; recall of the negative class is + "specificity". + + See Also + -------- + precision_recall_fscore_support: Compute precision, recall, F-measure and + support for each class. + confusion_matrix: Compute confusion matrix to evaluate the accuracy of a + classification. + multilabel_confusion_matrix: Compute a confusion matrix for each class or sample. + + Examples + -------- + >>> from sklearn.metrics import classification_report + >>> y_true = [0, 1, 2, 2, 2] + >>> y_pred = [0, 0, 2, 2, 1] + >>> target_names = ['class 0', 'class 1', 'class 2'] + >>> print(classification_report(y_true, y_pred, target_names=target_names)) + precision recall f1-score support + + class 0 0.50 1.00 0.67 1 + class 1 0.00 0.00 0.00 1 + class 2 1.00 0.67 0.80 3 + + accuracy 0.60 5 + macro avg 0.50 0.56 0.49 5 + weighted avg 0.70 0.60 0.61 5 + + >>> y_pred = [1, 1, 0] + >>> y_true = [1, 1, 1] + >>> print(classification_report(y_true, y_pred, labels=[1, 2, 3])) + precision recall f1-score support + + 1 1.00 0.67 0.80 3 + 2 0.00 0.00 0.00 0 + 3 0.00 0.00 0.00 0 + + micro avg 1.00 0.67 0.80 3 + macro avg 0.33 0.22 0.27 3 + weighted avg 1.00 0.67 0.80 3 + + """ + + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + + if labels is None: + labels = unique_labels(y_true, y_pred) + labels_given = False + else: + labels = np.asarray(labels) + labels_given = True + + # labelled micro average + micro_is_accuracy = (y_type == "multiclass" or y_type == "binary") and ( + not labels_given or (set(labels) == set(unique_labels(y_true, y_pred))) + ) + + if target_names is not None and len(labels) != len(target_names): + if labels_given: + warnings.warn( + "labels size, {0}, does not match size of target_names, {1}".format( + len(labels), len(target_names) + ) + ) + else: + raise ValueError( + "Number of classes, {0}, does not match size of " + "target_names, {1}. Try specifying the labels " + "parameter".format(len(labels), len(target_names)) + ) + if target_names is None: + target_names = ["%s" % l for l in labels] + + headers = ["precision", "recall", "f1-score", "support"] + # compute per-class results without averaging + p, r, f1, s = precision_recall_fscore_support( + y_true, + y_pred, + labels=labels, + average=None, + sample_weight=sample_weight, + zero_division=zero_division, + ) + rows = zip(target_names, p, r, f1, s) + + if y_type.startswith("multilabel"): + average_options = ("micro", "macro", "weighted", "samples") + else: + average_options = ("micro", "macro", "weighted") + + if output_dict: + report_dict = {label[0]: label[1:] for label in rows} + for label, scores in report_dict.items(): + report_dict[label] = dict(zip(headers, [float(i) for i in scores])) + else: + longest_last_line_heading = "weighted avg" + name_width = max(len(cn) for cn in target_names) + width = max(name_width, len(longest_last_line_heading), digits) + head_fmt = "{:>{width}s} " + " {:>9}" * len(headers) + report = head_fmt.format("", *headers, width=width) + report += "\n\n" + row_fmt = "{:>{width}s} " + " {:>9.{digits}f}" * 3 + " {:>9}\n" + for row in rows: + report += row_fmt.format(*row, width=width, digits=digits) + report += "\n" + + # compute all applicable averages + for average in average_options: + if average.startswith("micro") and micro_is_accuracy: + line_heading = "accuracy" + else: + line_heading = average + " avg" + + # compute averages with specified averaging method + avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support( + y_true, + y_pred, + labels=labels, + average=average, + sample_weight=sample_weight, + zero_division=zero_division, + ) + avg = [avg_p, avg_r, avg_f1, np.sum(s)] + + if output_dict: + report_dict[line_heading] = dict(zip(headers, [float(i) for i in avg])) + else: + if line_heading == "accuracy": + row_fmt_accuracy = ( + "{:>{width}s} " + + " {:>9.{digits}}" * 2 + + " {:>9.{digits}f}" + + " {:>9}\n" + ) + report += row_fmt_accuracy.format( + line_heading, "", "", *avg[2:], width=width, digits=digits + ) + else: + report += row_fmt.format(line_heading, *avg, width=width, digits=digits) + + if output_dict: + if "accuracy" in report_dict.keys(): + report_dict["accuracy"] = report_dict["accuracy"]["precision"] + return report_dict + else: + return report + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_pred": ["array-like", "sparse matrix"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def hamming_loss(y_true, y_pred, *, sample_weight=None): + """Compute the average Hamming loss. + + The Hamming loss is the fraction of labels that are incorrectly predicted. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : 1d array-like, or label indicator array / sparse matrix + Ground truth (correct) labels. + + y_pred : 1d array-like, or label indicator array / sparse matrix + Predicted labels, as returned by a classifier. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + .. versionadded:: 0.18 + + Returns + ------- + loss : float or int + Return the average Hamming loss between element of ``y_true`` and + ``y_pred``. + + See Also + -------- + accuracy_score : Compute the accuracy score. By default, the function will + return the fraction of correct predictions divided by the total number + of predictions. + jaccard_score : Compute the Jaccard similarity coefficient score. + zero_one_loss : Compute the Zero-one classification loss. By default, the + function will return the percentage of imperfectly predicted subsets. + + Notes + ----- + In multiclass classification, the Hamming loss corresponds to the Hamming + distance between ``y_true`` and ``y_pred`` which is equivalent to the + subset ``zero_one_loss`` function, when `normalize` parameter is set to + True. + + In multilabel classification, the Hamming loss is different from the + subset zero-one loss. The zero-one loss considers the entire set of labels + for a given sample incorrect if it does not entirely match the true set of + labels. Hamming loss is more forgiving in that it penalizes only the + individual labels. + + The Hamming loss is upperbounded by the subset zero-one loss, when + `normalize` parameter is set to True. It is always between 0 and 1, + lower being better. + + References + ---------- + .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification: + An Overview. International Journal of Data Warehousing & Mining, + 3(3), 1-13, July-September 2007. + + .. [2] `Wikipedia entry on the Hamming distance + `_. + + Examples + -------- + >>> from sklearn.metrics import hamming_loss + >>> y_pred = [1, 2, 3, 4] + >>> y_true = [2, 2, 3, 4] + >>> hamming_loss(y_true, y_pred) + 0.25 + + In the multilabel case with binary label indicators: + + >>> import numpy as np + >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) + 0.75 + """ + + y_type, y_true, y_pred = _check_targets(y_true, y_pred) + check_consistent_length(y_true, y_pred, sample_weight) + + if sample_weight is None: + weight_average = 1.0 + else: + weight_average = np.mean(sample_weight) + + if y_type.startswith("multilabel"): + n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight) + return n_differences / (y_true.shape[0] * y_true.shape[1] * weight_average) + + elif y_type in ["binary", "multiclass"]: + return _weighted_sum(y_true != y_pred, sample_weight, normalize=True) + else: + raise ValueError("{0} is not supported".format(y_type)) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "eps": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both")], + "normalize": ["boolean"], + "sample_weight": ["array-like", None], + "labels": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def log_loss( + y_true, y_pred, *, eps="auto", normalize=True, sample_weight=None, labels=None +): + r"""Log loss, aka logistic loss or cross-entropy loss. + + This is the loss function used in (multinomial) logistic regression + and extensions of it such as neural networks, defined as the negative + log-likelihood of a logistic model that returns ``y_pred`` probabilities + for its training data ``y_true``. + The log loss is only defined for two or more labels. + For a single sample with true label :math:`y \in \{0,1\}` and + a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log + loss is: + + .. math:: + L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p)) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like or label indicator matrix + Ground truth (correct) labels for n_samples samples. + + y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,) + Predicted probabilities, as returned by a classifier's + predict_proba method. If ``y_pred.shape = (n_samples,)`` + the probabilities provided are assumed to be that of the + positive class. The labels in ``y_pred`` are assumed to be + ordered alphabetically, as done by + :class:`~sklearn.preprocessing.LabelBinarizer`. + + eps : float or "auto", default="auto" + Log loss is undefined for p=0 or p=1, so probabilities are + clipped to `max(eps, min(1 - eps, p))`. The default will depend on the + data type of `y_pred` and is set to `np.finfo(y_pred.dtype).eps`. + + .. versionadded:: 1.2 + + .. versionchanged:: 1.2 + The default value changed from `1e-15` to `"auto"` that is + equivalent to `np.finfo(y_pred.dtype).eps`. + + .. deprecated:: 1.3 + `eps` is deprecated in 1.3 and will be removed in 1.5. + + normalize : bool, default=True + If true, return the mean loss per sample. + Otherwise, return the sum of the per-sample losses. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + labels : array-like, default=None + If not provided, labels will be inferred from y_true. If ``labels`` + is ``None`` and ``y_pred`` has shape (n_samples,) the labels are + assumed to be binary and are inferred from ``y_true``. + + .. versionadded:: 0.18 + + Returns + ------- + loss : float + Log loss, aka logistic loss or cross-entropy loss. + + Notes + ----- + The logarithm used is the natural logarithm (base-e). + + References + ---------- + C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, + p. 209. + + Examples + -------- + >>> from sklearn.metrics import log_loss + >>> log_loss(["spam", "ham", "ham", "spam"], + ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) + 0.21616... + """ + y_pred = check_array( + y_pred, ensure_2d=False, dtype=[np.float64, np.float32, np.float16] + ) + if eps == "auto": + eps = np.finfo(y_pred.dtype).eps + else: + # TODO: Remove user defined eps in 1.5 + warnings.warn( + ( + "Setting the eps parameter is deprecated and will " + "be removed in 1.5. Instead eps will always have" + "a default value of `np.finfo(y_pred.dtype).eps`." + ), + FutureWarning, + ) + + check_consistent_length(y_pred, y_true, sample_weight) + lb = LabelBinarizer() + + if labels is not None: + lb.fit(labels) + else: + lb.fit(y_true) + + if len(lb.classes_) == 1: + if labels is None: + raise ValueError( + "y_true contains only one label ({0}). Please " + "provide the true labels explicitly through the " + "labels argument.".format(lb.classes_[0]) + ) + else: + raise ValueError( + "The labels array needs to contain at least two " + "labels for log_loss, " + "got {0}.".format(lb.classes_) + ) + + transformed_labels = lb.transform(y_true) + + if transformed_labels.shape[1] == 1: + transformed_labels = np.append( + 1 - transformed_labels, transformed_labels, axis=1 + ) + + # Clipping + y_pred = np.clip(y_pred, eps, 1 - eps) + + # If y_pred is of single dimension, assume y_true to be binary + # and then check. + if y_pred.ndim == 1: + y_pred = y_pred[:, np.newaxis] + if y_pred.shape[1] == 1: + y_pred = np.append(1 - y_pred, y_pred, axis=1) + + # Check if dimensions are consistent. + transformed_labels = check_array(transformed_labels) + if len(lb.classes_) != y_pred.shape[1]: + if labels is None: + raise ValueError( + "y_true and y_pred contain different number of " + "classes {0}, {1}. Please provide the true " + "labels explicitly through the labels argument. " + "Classes found in " + "y_true: {2}".format( + transformed_labels.shape[1], y_pred.shape[1], lb.classes_ + ) + ) + else: + raise ValueError( + "The number of classes in labels is different " + "from that in y_pred. Classes found in " + "labels: {0}".format(lb.classes_) + ) + + # Renormalize + y_pred_sum = y_pred.sum(axis=1) + if not np.isclose(y_pred_sum, 1, rtol=1e-15, atol=5 * eps).all(): + warnings.warn( + ( + "The y_pred values do not sum to one. Starting from 1.5 this" + "will result in an error." + ), + UserWarning, + ) + y_pred = y_pred / y_pred_sum[:, np.newaxis] + loss = -xlogy(transformed_labels, y_pred).sum(axis=1) + + return _weighted_sum(loss, sample_weight, normalize) + + +@validate_params( + { + "y_true": ["array-like"], + "pred_decision": ["array-like"], + "labels": ["array-like", None], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def hinge_loss(y_true, pred_decision, *, labels=None, sample_weight=None): + """Average hinge loss (non-regularized). + + In binary class case, assuming labels in y_true are encoded with +1 and -1, + when a prediction mistake is made, ``margin = y_true * pred_decision`` is + always negative (since the signs disagree), implying ``1 - margin`` is + always greater than 1. The cumulated hinge loss is therefore an upper + bound of the number of mistakes made by the classifier. + + In multiclass case, the function expects that either all the labels are + included in y_true or an optional labels argument is provided which + contains all the labels. The multilabel margin is calculated according + to Crammer-Singer's method. As in the binary case, the cumulated hinge loss + is an upper bound of the number of mistakes made by the classifier. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True target, consisting of integers of two values. The positive label + must be greater than the negative label. + + pred_decision : array-like of shape (n_samples,) or (n_samples, n_classes) + Predicted decisions, as output by decision_function (floats). + + labels : array-like, default=None + Contains all the labels for the problem. Used in multiclass hinge loss. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + loss : float + Average hinge loss. + + References + ---------- + .. [1] `Wikipedia entry on the Hinge loss + `_. + + .. [2] Koby Crammer, Yoram Singer. On the Algorithmic + Implementation of Multiclass Kernel-based Vector + Machines. Journal of Machine Learning Research 2, + (2001), 265-292. + + .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models + by Robert C. Moore, John DeNero + `_. + + Examples + -------- + >>> from sklearn import svm + >>> from sklearn.metrics import hinge_loss + >>> X = [[0], [1]] + >>> y = [-1, 1] + >>> est = svm.LinearSVC(dual="auto", random_state=0) + >>> est.fit(X, y) + LinearSVC(dual='auto', random_state=0) + >>> pred_decision = est.decision_function([[-2], [3], [0.5]]) + >>> pred_decision + array([-2.18..., 2.36..., 0.09...]) + >>> hinge_loss([-1, 1, 1], pred_decision) + 0.30... + + In the multiclass case: + + >>> import numpy as np + >>> X = np.array([[0], [1], [2], [3]]) + >>> Y = np.array([0, 1, 2, 3]) + >>> labels = np.array([0, 1, 2, 3]) + >>> est = svm.LinearSVC(dual="auto") + >>> est.fit(X, Y) + LinearSVC(dual='auto') + >>> pred_decision = est.decision_function([[-1], [2], [3]]) + >>> y_true = [0, 2, 3] + >>> hinge_loss(y_true, pred_decision, labels=labels) + 0.56... + """ + check_consistent_length(y_true, pred_decision, sample_weight) + pred_decision = check_array(pred_decision, ensure_2d=False) + y_true = column_or_1d(y_true) + y_true_unique = np.unique(labels if labels is not None else y_true) + + if y_true_unique.size > 2: + if pred_decision.ndim <= 1: + raise ValueError( + "The shape of pred_decision cannot be 1d array" + "with a multiclass target. pred_decision shape " + "must be (n_samples, n_classes), that is " + f"({y_true.shape[0]}, {y_true_unique.size})." + f" Got: {pred_decision.shape}" + ) + + # pred_decision.ndim > 1 is true + if y_true_unique.size != pred_decision.shape[1]: + if labels is None: + raise ValueError( + "Please include all labels in y_true " + "or pass labels as third argument" + ) + else: + raise ValueError( + "The shape of pred_decision is not " + "consistent with the number of classes. " + "With a multiclass target, pred_decision " + "shape must be " + "(n_samples, n_classes), that is " + f"({y_true.shape[0]}, {y_true_unique.size}). " + f"Got: {pred_decision.shape}" + ) + if labels is None: + labels = y_true_unique + le = LabelEncoder() + le.fit(labels) + y_true = le.transform(y_true) + mask = np.ones_like(pred_decision, dtype=bool) + mask[np.arange(y_true.shape[0]), y_true] = False + margin = pred_decision[~mask] + margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1) + + else: + # Handles binary class case + # this code assumes that positive and negative labels + # are encoded as +1 and -1 respectively + pred_decision = column_or_1d(pred_decision) + pred_decision = np.ravel(pred_decision) + + lbin = LabelBinarizer(neg_label=-1) + y_true = lbin.fit_transform(y_true)[:, 0] + + try: + margin = y_true * pred_decision + except TypeError: + raise TypeError("pred_decision should be an array of floats.") + + losses = 1 - margin + # The hinge_loss doesn't penalize good enough predictions. + np.clip(losses, 0, None, out=losses) + return np.average(losses, weights=sample_weight) + + +@validate_params( + { + "y_true": ["array-like"], + "y_prob": ["array-like"], + "sample_weight": ["array-like", None], + "pos_label": [Real, str, "boolean", None], + }, + prefer_skip_nested_validation=True, +) +def brier_score_loss(y_true, y_prob, *, sample_weight=None, pos_label=None): + """Compute the Brier score loss. + + The smaller the Brier score loss, the better, hence the naming with "loss". + The Brier score measures the mean squared difference between the predicted + probability and the actual outcome. The Brier score always + takes on a value between zero and one, since this is the largest + possible difference between a predicted probability (which must be + between zero and one) and the actual outcome (which can take on values + of only 0 and 1). It can be decomposed as the sum of refinement loss and + calibration loss. + + The Brier score is appropriate for binary and categorical outcomes that + can be structured as true or false, but is inappropriate for ordinal + variables which can take on three or more values (this is because the + Brier score assumes that all possible outcomes are equivalently + "distant" from one another). Which label is considered to be the positive + label is controlled via the parameter `pos_label`, which defaults to + the greater label unless `y_true` is all 0 or all -1, in which case + `pos_label` defaults to 1. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True targets. + + y_prob : array-like of shape (n_samples,) + Probabilities of the positive class. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + pos_label : int, float, bool or str, default=None + Label of the positive class. `pos_label` will be inferred in the + following manner: + + * if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1; + * else if `y_true` contains string, an error will be raised and + `pos_label` should be explicitly specified; + * otherwise, `pos_label` defaults to the greater label, + i.e. `np.unique(y_true)[-1]`. + + Returns + ------- + score : float + Brier score loss. + + References + ---------- + .. [1] `Wikipedia entry for the Brier score + `_. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import brier_score_loss + >>> y_true = np.array([0, 1, 1, 0]) + >>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"]) + >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3]) + >>> brier_score_loss(y_true, y_prob) + 0.037... + >>> brier_score_loss(y_true, 1-y_prob, pos_label=0) + 0.037... + >>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham") + 0.037... + >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) + 0.0 + """ + y_true = column_or_1d(y_true) + y_prob = column_or_1d(y_prob) + assert_all_finite(y_true) + assert_all_finite(y_prob) + check_consistent_length(y_true, y_prob, sample_weight) + + y_type = type_of_target(y_true, input_name="y_true") + if y_type != "binary": + raise ValueError( + "Only binary classification is supported. The type of the target " + f"is {y_type}." + ) + + if y_prob.max() > 1: + raise ValueError("y_prob contains values greater than 1.") + if y_prob.min() < 0: + raise ValueError("y_prob contains values less than 0.") + + try: + pos_label = _check_pos_label_consistency(pos_label, y_true) + except ValueError: + classes = np.unique(y_true) + if classes.dtype.kind not in ("O", "U", "S"): + # for backward compatibility, if classes are not string then + # `pos_label` will correspond to the greater label + pos_label = classes[-1] + else: + raise + y_true = np.array(y_true == pos_label, int) + return np.average((y_true - y_prob) ** 2, weights=sample_weight) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b46d049cf9b32d939c77dffba6ed71f7b2192d49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4ca16e937f7fe5a8073135ad2c45b52719de3173 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_dist_metrics.pxd @@ -0,0 +1,272 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/metrics/_dist_metrics.pxd.tp'. +# Changes must be made there. + +from libc.math cimport sqrt, exp + +from ..utils._typedefs cimport float64_t, float32_t, int32_t, intp_t + +cdef class DistanceMetric: + pass + +###################################################################### +# Inline distance functions +# +# We use these for the default (euclidean) case so that they can be +# inlined. This leads to faster computation for the most common case +cdef inline float64_t euclidean_dist64( + const float64_t* x1, + const float64_t* x2, + intp_t size, +) except -1 nogil: + cdef float64_t tmp, d=0 + cdef intp_t j + for j in range(size): + tmp = (x1[j] - x2[j]) + d += tmp * tmp + return sqrt(d) + + +cdef inline float64_t euclidean_rdist64( + const float64_t* x1, + const float64_t* x2, + intp_t size, +) except -1 nogil: + cdef float64_t tmp, d=0 + cdef intp_t j + for j in range(size): + tmp = (x1[j] - x2[j]) + d += tmp * tmp + return d + + +cdef inline float64_t euclidean_dist_to_rdist64(const float64_t dist) except -1 nogil: + return dist * dist + + +cdef inline float64_t euclidean_rdist_to_dist64(const float64_t dist) except -1 nogil: + return sqrt(dist) + + +###################################################################### +# DistanceMetric64 base class +cdef class DistanceMetric64(DistanceMetric): + # The following attributes are required for a few of the subclasses. + # we must define them here so that cython's limited polymorphism will work. + # Because we don't expect to instantiate a lot of these objects, the + # extra memory overhead of this setup should not be an issue. + cdef float64_t p + cdef const float64_t[::1] vec + cdef const float64_t[:, ::1] mat + cdef intp_t size + cdef object func + cdef object kwargs + + cdef float64_t dist( + self, + const float64_t* x1, + const float64_t* x2, + intp_t size, + ) except -1 nogil + + cdef float64_t rdist( + self, + const float64_t* x1, + const float64_t* x2, + intp_t size, + ) except -1 nogil + + cdef float64_t dist_csr( + self, + const float64_t* x1_data, + const int32_t* x1_indices, + const float64_t* x2_data, + const int32_t* x2_indices, + const int32_t x1_start, + const int32_t x1_end, + const int32_t x2_start, + const int32_t x2_end, + const intp_t size, + ) except -1 nogil + + cdef float64_t rdist_csr( + self, + const float64_t* x1_data, + const int32_t* x1_indices, + const float64_t* x2_data, + const int32_t* x2_indices, + const int32_t x1_start, + const int32_t x1_end, + const int32_t x2_start, + const int32_t x2_end, + const intp_t size, + ) except -1 nogil + + cdef int pdist( + self, + const float64_t[:, ::1] X, + float64_t[:, ::1] D, + ) except -1 + + cdef int cdist( + self, + const float64_t[:, ::1] X, + const float64_t[:, ::1] Y, + float64_t[:, ::1] D, + ) except -1 + + cdef int pdist_csr( + self, + const float64_t* x1_data, + const int32_t[::1] x1_indices, + const int32_t[::1] x1_indptr, + const intp_t size, + float64_t[:, ::1] D, + ) except -1 nogil + + cdef int cdist_csr( + self, + const float64_t* x1_data, + const int32_t[::1] x1_indices, + const int32_t[::1] x1_indptr, + const float64_t* x2_data, + const int32_t[::1] x2_indices, + const int32_t[::1] x2_indptr, + const intp_t size, + float64_t[:, ::1] D, + ) except -1 nogil + + cdef float64_t _rdist_to_dist(self, float64_t rdist) except -1 nogil + + cdef float64_t _dist_to_rdist(self, float64_t dist) except -1 nogil + +###################################################################### +# Inline distance functions +# +# We use these for the default (euclidean) case so that they can be +# inlined. This leads to faster computation for the most common case +cdef inline float64_t euclidean_dist32( + const float32_t* x1, + const float32_t* x2, + intp_t size, +) except -1 nogil: + cdef float64_t tmp, d=0 + cdef intp_t j + for j in range(size): + tmp = (x1[j] - x2[j]) + d += tmp * tmp + return sqrt(d) + + +cdef inline float64_t euclidean_rdist32( + const float32_t* x1, + const float32_t* x2, + intp_t size, +) except -1 nogil: + cdef float64_t tmp, d=0 + cdef intp_t j + for j in range(size): + tmp = (x1[j] - x2[j]) + d += tmp * tmp + return d + + +cdef inline float64_t euclidean_dist_to_rdist32(const float32_t dist) except -1 nogil: + return dist * dist + + +cdef inline float64_t euclidean_rdist_to_dist32(const float32_t dist) except -1 nogil: + return sqrt(dist) + + +###################################################################### +# DistanceMetric32 base class +cdef class DistanceMetric32(DistanceMetric): + # The following attributes are required for a few of the subclasses. + # we must define them here so that cython's limited polymorphism will work. + # Because we don't expect to instantiate a lot of these objects, the + # extra memory overhead of this setup should not be an issue. + cdef float64_t p + cdef const float64_t[::1] vec + cdef const float64_t[:, ::1] mat + cdef intp_t size + cdef object func + cdef object kwargs + + cdef float32_t dist( + self, + const float32_t* x1, + const float32_t* x2, + intp_t size, + ) except -1 nogil + + cdef float32_t rdist( + self, + const float32_t* x1, + const float32_t* x2, + intp_t size, + ) except -1 nogil + + cdef float32_t dist_csr( + self, + const float32_t* x1_data, + const int32_t* x1_indices, + const float32_t* x2_data, + const int32_t* x2_indices, + const int32_t x1_start, + const int32_t x1_end, + const int32_t x2_start, + const int32_t x2_end, + const intp_t size, + ) except -1 nogil + + cdef float32_t rdist_csr( + self, + const float32_t* x1_data, + const int32_t* x1_indices, + const float32_t* x2_data, + const int32_t* x2_indices, + const int32_t x1_start, + const int32_t x1_end, + const int32_t x2_start, + const int32_t x2_end, + const intp_t size, + ) except -1 nogil + + cdef int pdist( + self, + const float32_t[:, ::1] X, + float32_t[:, ::1] D, + ) except -1 + + cdef int cdist( + self, + const float32_t[:, ::1] X, + const float32_t[:, ::1] Y, + float32_t[:, ::1] D, + ) except -1 + + cdef int pdist_csr( + self, + const float32_t* x1_data, + const int32_t[::1] x1_indices, + const int32_t[::1] x1_indptr, + const intp_t size, + float32_t[:, ::1] D, + ) except -1 nogil + + cdef int cdist_csr( + self, + const float32_t* x1_data, + const int32_t[::1] x1_indices, + const int32_t[::1] x1_indptr, + const float32_t* x2_data, + const int32_t[::1] x2_indices, + const int32_t[::1] x2_indptr, + const intp_t size, + float32_t[:, ::1] D, + ) except -1 nogil + + cdef float32_t _rdist_to_dist(self, float32_t rdist) except -1 nogil + + cdef float32_t _dist_to_rdist(self, float32_t dist) except -1 nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73d291995c31bcb5b54f35e342777ad34a207332 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__init__.py @@ -0,0 +1,112 @@ +# +# Pairwise Distances Reductions +# ============================= +# +# Authors: The scikit-learn developers. +# License: BSD 3 clause +# +# Overview +# -------- +# +# This module provides routines to compute pairwise distances between a set +# of row vectors of X and another set of row vectors of Y and apply a +# reduction on top. The canonical example is the brute-force computation +# of the top k nearest neighbors by leveraging the arg-k-min reduction. +# +# The reduction takes a matrix of pairwise distances between rows of X and Y +# as input and outputs an aggregate data-structure for each row of X. The +# aggregate values are typically smaller than the number of rows in Y, hence +# the term reduction. +# +# For computational reasons, the reduction are performed on the fly on chunks +# of rows of X and Y so as to keep intermediate data-structures in CPU cache +# and avoid unnecessary round trips of large distance arrays with the RAM +# that would otherwise severely degrade the speed by making the overall +# processing memory-bound. +# +# Finally, the routines follow a generic parallelization template to process +# chunks of data with OpenMP loops (via Cython prange), either on rows of X +# or rows of Y depending on their respective sizes. +# +# +# Dispatching to specialized implementations +# ------------------------------------------ +# +# Dispatchers are meant to be used in the Python code. Under the hood, a +# dispatcher must only define the logic to choose at runtime to the correct +# dtype-specialized :class:`BaseDistancesReductionDispatcher` implementation based +# on the dtype of X and of Y. +# +# +# High-level diagram +# ------------------ +# +# Legend: +# +# A ---⊳ B: A inherits from B +# A ---x B: A dispatches to B +# +# +# (base dispatcher) +# BaseDistancesReductionDispatcher +# ∆ +# | +# | +# +------------------+---------------+---------------+------------------+ +# | | | | +# | (dispatcher) (dispatcher) | +# | ArgKmin RadiusNeighbors | +# | | | | +# | | | | +# | | (float{32,64} implem.) | | +# | | BaseDistancesReduction{32,64} | | +# | | ∆ | | +# (dispatcher) | | | (dispatcher) +# ArgKminClassMode | | | RadiusNeighborsClassMode +# | | +----------+----------+ | | +# | | | | | | +# | | | | | | +# | x | | x | +# | +-------⊳ ArgKmin{32,64} RadiusNeighbors{32,64} ⊲---+ | +# x | | ∆ ∆ | | x +# ArgKminClassMode{32,64} | | | | RadiusNeighborsClassMode{32,64} +# ===================================== Specializations ============================================ +# | | | | +# | | | | +# x | | x +# EuclideanArgKmin{32,64} EuclideanRadiusNeighbors{32,64} +# +# +# For instance :class:`ArgKmin` dispatches to: +# - :class:`ArgKmin64` if X and Y are two `float64` array-likes +# - :class:`ArgKmin32` if X and Y are two `float32` array-likes +# +# In addition, if the metric parameter is set to "euclidean" or "sqeuclidean", +# then some direct subclass of `BaseDistancesReduction{32,64}` further dispatches +# to one of their subclass for euclidean-specialized implementation. For instance, +# :class:`ArgKmin64` dispatches to :class:`EuclideanArgKmin64`. +# +# Those Euclidean-specialized implementations relies on optimal implementations of +# a decomposition of the squared euclidean distance matrix into a sum of three terms +# (see :class:`MiddleTermComputer{32,64}`). +# + +from ._dispatcher import ( + ArgKmin, + ArgKminClassMode, + BaseDistancesReductionDispatcher, + RadiusNeighbors, + RadiusNeighborsClassMode, + sqeuclidean_row_norms, +) + +__all__ = [ + "BaseDistancesReductionDispatcher", + "ArgKmin", + "RadiusNeighbors", + "ArgKminClassMode", + "RadiusNeighborsClassMode", + "sqeuclidean_row_norms", +] + +# ruff: noqa: E501 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58f8149ef25088726e913f4c4138665e1ad28c16 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/_dispatcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/_dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27173fcc8236ca1f3cbd63393791b51e208e9863 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/__pycache__/_dispatcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d1020d0a2011dceef85ce9e16fd3d9969bcec927 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0bcddddab602de10b822c527a572347fc42e9b8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd @@ -0,0 +1,57 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_argkmin.pxd.tp'. +# Changes must be made there. + +from ...utils._typedefs cimport intp_t, float64_t + +from ._base cimport BaseDistancesReduction64 +from ._middle_term_computer cimport MiddleTermComputer64 + +cdef class ArgKmin64(BaseDistancesReduction64): + """float64 implementation of the ArgKmin.""" + + cdef: + intp_t k + + intp_t[:, ::1] argkmin_indices + float64_t[:, ::1] argkmin_distances + + # Used as array of pointers to private datastructures used in threads. + float64_t ** heaps_r_distances_chunks + intp_t ** heaps_indices_chunks + + +cdef class EuclideanArgKmin64(ArgKmin64): + """EuclideanDistance-specialisation of ArgKmin64.""" + cdef: + MiddleTermComputer64 middle_term_computer + const float64_t[::1] X_norm_squared + const float64_t[::1] Y_norm_squared + + bint use_squared_distances + +from ._base cimport BaseDistancesReduction32 +from ._middle_term_computer cimport MiddleTermComputer32 + +cdef class ArgKmin32(BaseDistancesReduction32): + """float32 implementation of the ArgKmin.""" + + cdef: + intp_t k + + intp_t[:, ::1] argkmin_indices + float64_t[:, ::1] argkmin_distances + + # Used as array of pointers to private datastructures used in threads. + float64_t ** heaps_r_distances_chunks + intp_t ** heaps_indices_chunks + + +cdef class EuclideanArgKmin32(ArgKmin32): + """EuclideanDistance-specialisation of ArgKmin32.""" + cdef: + MiddleTermComputer32 middle_term_computer + const float64_t[::1] X_norm_squared + const float64_t[::1] Y_norm_squared + + bint use_squared_distances diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin_classmode.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin_classmode.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..20f386bd5e769dd977d74796d1af19832be942fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_argkmin_classmode.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f3de7b0807f9490ea1c44bf936fa811aa2868906 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.pxd new file mode 100644 index 0000000000000000000000000000000000000000..b5fa06227d08e3ea290195f755099796b3a7e403 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_base.pxd @@ -0,0 +1,265 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_base.pxd.tp'. +# Changes must be made there. + +from cython cimport final + +from ...utils._typedefs cimport intp_t, float64_t + +from ._datasets_pair cimport DatasetsPair64 + + +cpdef float64_t[::1] _sqeuclidean_row_norms64( + X, + intp_t num_threads, +) + +cdef class BaseDistancesReduction64: + """ + Base float64 implementation template of the pairwise-distances + reduction backends. + + Implementations inherit from this template and may override the several + defined hooks as needed in order to easily extend functionality with + minimal redundant code. + """ + + cdef: + readonly DatasetsPair64 datasets_pair + + # The number of threads that can be used is stored in effective_n_threads. + # + # The number of threads to use in the parallelization strategy + # (i.e. parallel_on_X or parallel_on_Y) can be smaller than effective_n_threads: + # for small datasets, fewer threads might be needed to loop over pair of chunks. + # + # Hence, the number of threads that _will_ be used for looping over chunks + # is stored in chunks_n_threads, allowing solely using what we need. + # + # Thus, an invariant is: + # + # chunks_n_threads <= effective_n_threads + # + intp_t effective_n_threads + intp_t chunks_n_threads + + intp_t n_samples_chunk, chunk_size + + intp_t n_samples_X, X_n_samples_chunk, X_n_chunks, X_n_samples_last_chunk + intp_t n_samples_Y, Y_n_samples_chunk, Y_n_chunks, Y_n_samples_last_chunk + + bint execute_in_parallel_on_Y + + @final + cdef void _parallel_on_X(self) noexcept nogil + + @final + cdef void _parallel_on_Y(self) noexcept nogil + + # Placeholder methods which have to be implemented + + cdef void _compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + + # Placeholder methods which can be implemented + + cdef void compute_exact_distances(self) noexcept nogil + + cdef void _parallel_on_X_parallel_init( + self, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_init_chunk( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_prange_iter_finalize( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_X_parallel_finalize( + self, + intp_t thread_num + ) noexcept nogil + + cdef void _parallel_on_Y_init( + self, + ) noexcept nogil + + cdef void _parallel_on_Y_parallel_init( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_Y_synchronize( + self, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_finalize( + self, + ) noexcept nogil + +from ._datasets_pair cimport DatasetsPair32 + + +cpdef float64_t[::1] _sqeuclidean_row_norms32( + X, + intp_t num_threads, +) + +cdef class BaseDistancesReduction32: + """ + Base float32 implementation template of the pairwise-distances + reduction backends. + + Implementations inherit from this template and may override the several + defined hooks as needed in order to easily extend functionality with + minimal redundant code. + """ + + cdef: + readonly DatasetsPair32 datasets_pair + + # The number of threads that can be used is stored in effective_n_threads. + # + # The number of threads to use in the parallelization strategy + # (i.e. parallel_on_X or parallel_on_Y) can be smaller than effective_n_threads: + # for small datasets, fewer threads might be needed to loop over pair of chunks. + # + # Hence, the number of threads that _will_ be used for looping over chunks + # is stored in chunks_n_threads, allowing solely using what we need. + # + # Thus, an invariant is: + # + # chunks_n_threads <= effective_n_threads + # + intp_t effective_n_threads + intp_t chunks_n_threads + + intp_t n_samples_chunk, chunk_size + + intp_t n_samples_X, X_n_samples_chunk, X_n_chunks, X_n_samples_last_chunk + intp_t n_samples_Y, Y_n_samples_chunk, Y_n_chunks, Y_n_samples_last_chunk + + bint execute_in_parallel_on_Y + + @final + cdef void _parallel_on_X(self) noexcept nogil + + @final + cdef void _parallel_on_Y(self) noexcept nogil + + # Placeholder methods which have to be implemented + + cdef void _compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + + # Placeholder methods which can be implemented + + cdef void compute_exact_distances(self) noexcept nogil + + cdef void _parallel_on_X_parallel_init( + self, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_init_chunk( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_prange_iter_finalize( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_X_parallel_finalize( + self, + intp_t thread_num + ) noexcept nogil + + cdef void _parallel_on_Y_init( + self, + ) noexcept nogil + + cdef void _parallel_on_Y_parallel_init( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_Y_synchronize( + self, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_finalize( + self, + ) noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_classmode.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_classmode.pxd new file mode 100644 index 0000000000000000000000000000000000000000..65db044d668e89cc0a681a871663220d065dca41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_classmode.pxd @@ -0,0 +1,5 @@ +cpdef enum WeightingStrategy: + uniform = 0 + # TODO: Implement the following options in weighted_histogram_mode + distance = 1 + callable = 2 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..92219909b634c3992e10e006d1c4f7b8da608caf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd new file mode 100644 index 0000000000000000000000000000000000000000..17c9f75dff79aed59ad330cac113e7c3f414038c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd @@ -0,0 +1,106 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_datasets_pair.pxd.tp'. +# Changes must be made there. + +from ...utils._typedefs cimport float64_t, float32_t, int32_t, intp_t +from ...metrics._dist_metrics cimport DistanceMetric64, DistanceMetric32, DistanceMetric + + +cdef class DatasetsPair64: + cdef: + DistanceMetric64 distance_metric + intp_t n_features + + cdef intp_t n_samples_X(self) noexcept nogil + + cdef intp_t n_samples_Y(self) noexcept nogil + + cdef float64_t dist(self, intp_t i, intp_t j) noexcept nogil + + cdef float64_t surrogate_dist(self, intp_t i, intp_t j) noexcept nogil + + +cdef class DenseDenseDatasetsPair64(DatasetsPair64): + cdef: + const float64_t[:, ::1] X + const float64_t[:, ::1] Y + + +cdef class SparseSparseDatasetsPair64(DatasetsPair64): + cdef: + const float64_t[:] X_data + const int32_t[::1] X_indices + const int32_t[::1] X_indptr + + const float64_t[:] Y_data + const int32_t[::1] Y_indices + const int32_t[::1] Y_indptr + + +cdef class SparseDenseDatasetsPair64(DatasetsPair64): + cdef: + const float64_t[:] X_data + const int32_t[::1] X_indices + const int32_t[::1] X_indptr + + const float64_t[:] Y_data + const int32_t[::1] Y_indices + intp_t n_Y + + +cdef class DenseSparseDatasetsPair64(DatasetsPair64): + cdef: + # As distance metrics are commutative, we can simply rely + # on the implementation of SparseDenseDatasetsPair and + # swap arguments. + DatasetsPair64 datasets_pair + + +cdef class DatasetsPair32: + cdef: + DistanceMetric32 distance_metric + intp_t n_features + + cdef intp_t n_samples_X(self) noexcept nogil + + cdef intp_t n_samples_Y(self) noexcept nogil + + cdef float64_t dist(self, intp_t i, intp_t j) noexcept nogil + + cdef float64_t surrogate_dist(self, intp_t i, intp_t j) noexcept nogil + + +cdef class DenseDenseDatasetsPair32(DatasetsPair32): + cdef: + const float32_t[:, ::1] X + const float32_t[:, ::1] Y + + +cdef class SparseSparseDatasetsPair32(DatasetsPair32): + cdef: + const float32_t[:] X_data + const int32_t[::1] X_indices + const int32_t[::1] X_indptr + + const float32_t[:] Y_data + const int32_t[::1] Y_indices + const int32_t[::1] Y_indptr + + +cdef class SparseDenseDatasetsPair32(DatasetsPair32): + cdef: + const float32_t[:] X_data + const int32_t[::1] X_indices + const int32_t[::1] X_indptr + + const float32_t[:] Y_data + const int32_t[::1] Y_indices + intp_t n_Y + + +cdef class DenseSparseDatasetsPair32(DatasetsPair32): + cdef: + # As distance metrics are commutative, we can simply rely + # on the implementation of SparseDenseDatasetsPair and + # swap arguments. + DatasetsPair32 datasets_pair diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..956de3577bcee224be751f73297d35242725d63e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py @@ -0,0 +1,764 @@ +from abc import abstractmethod +from typing import List + +import numpy as np +from scipy.sparse import issparse + +from ... import get_config +from .._dist_metrics import ( + BOOL_METRICS, + METRIC_MAPPING64, + DistanceMetric, +) +from ._argkmin import ( + ArgKmin32, + ArgKmin64, +) +from ._argkmin_classmode import ( + ArgKminClassMode32, + ArgKminClassMode64, +) +from ._base import _sqeuclidean_row_norms32, _sqeuclidean_row_norms64 +from ._radius_neighbors import ( + RadiusNeighbors32, + RadiusNeighbors64, +) +from ._radius_neighbors_classmode import ( + RadiusNeighborsClassMode32, + RadiusNeighborsClassMode64, +) + + +def sqeuclidean_row_norms(X, num_threads): + """Compute the squared euclidean norm of the rows of X in parallel. + + Parameters + ---------- + X : ndarray or CSR matrix of shape (n_samples, n_features) + Input data. Must be c-contiguous. + + num_threads : int + The number of OpenMP threads to use. + + Returns + ------- + sqeuclidean_row_norms : ndarray of shape (n_samples,) + Arrays containing the squared euclidean norm of each row of X. + """ + if X.dtype == np.float64: + return np.asarray(_sqeuclidean_row_norms64(X, num_threads)) + if X.dtype == np.float32: + return np.asarray(_sqeuclidean_row_norms32(X, num_threads)) + + raise ValueError( + "Only float64 or float32 datasets are supported at this time, " + f"got: X.dtype={X.dtype}." + ) + + +class BaseDistancesReductionDispatcher: + """Abstract base dispatcher for pairwise distance computation & reduction. + + Each dispatcher extending the base :class:`BaseDistancesReductionDispatcher` + dispatcher must implement the :meth:`compute` classmethod. + """ + + @classmethod + def valid_metrics(cls) -> List[str]: + excluded = { + # PyFunc cannot be supported because it necessitates interacting with + # the CPython interpreter to call user defined functions. + "pyfunc", + "mahalanobis", # is numerically unstable + # In order to support discrete distance metrics, we need to have a + # stable simultaneous sort which preserves the order of the indices + # because there generally is a lot of occurrences for a given values + # of distances in this case. + # TODO: implement a stable simultaneous_sort. + "hamming", + *BOOL_METRICS, + } + return sorted(({"sqeuclidean"} | set(METRIC_MAPPING64.keys())) - excluded) + + @classmethod + def is_usable_for(cls, X, Y, metric) -> bool: + """Return True if the dispatcher can be used for the + given parameters. + + Parameters + ---------- + X : {ndarray, sparse matrix} of shape (n_samples_X, n_features) + Input data. + + Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features) + Input data. + + metric : str, default='euclidean' + The distance metric to use. + For a list of available metrics, see the documentation of + :class:`~sklearn.metrics.DistanceMetric`. + + Returns + ------- + True if the dispatcher can be used, else False. + """ + + # FIXME: the current Cython implementation is too slow for a large number of + # features. We temporarily disable it to fallback on SciPy's implementation. + # See: https://github.com/scikit-learn/scikit-learn/issues/28191 + if ( + issparse(X) + and issparse(Y) + and isinstance(metric, str) + and "euclidean" in metric + ): + return False + + def is_numpy_c_ordered(X): + return hasattr(X, "flags") and getattr(X.flags, "c_contiguous", False) + + def is_valid_sparse_matrix(X): + return ( + issparse(X) + and X.format == "csr" + and + # TODO: support CSR matrices without non-zeros elements + X.nnz > 0 + and + # TODO: support CSR matrices with int64 indices and indptr + # See: https://github.com/scikit-learn/scikit-learn/issues/23653 + X.indices.dtype == X.indptr.dtype == np.int32 + ) + + is_usable = ( + get_config().get("enable_cython_pairwise_dist", True) + and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X)) + and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y)) + and X.dtype == Y.dtype + and X.dtype in (np.float32, np.float64) + and (metric in cls.valid_metrics() or isinstance(metric, DistanceMetric)) + ) + + return is_usable + + @classmethod + @abstractmethod + def compute( + cls, + X, + Y, + **kwargs, + ): + """Compute the reduction. + + Parameters + ---------- + X : ndarray or CSR matrix of shape (n_samples_X, n_features) + Input data. + + Y : ndarray or CSR matrix of shape (n_samples_Y, n_features) + Input data. + + **kwargs : additional parameters for the reduction + + Notes + ----- + This method is an abstract class method: it has to be implemented + for all subclasses. + """ + + +class ArgKmin(BaseDistancesReductionDispatcher): + """Compute the argkmin of row vectors of X on the ones of Y. + + For each row vector of X, computes the indices of k first the rows + vectors of Y with the smallest distances. + + ArgKmin is typically used to perform + bruteforce k-nearest neighbors queries. + + This class is not meant to be instantiated, one should only use + its :meth:`compute` classmethod which handles allocation and + deallocation consistently. + """ + + @classmethod + def compute( + cls, + X, + Y, + k, + metric="euclidean", + chunk_size=None, + metric_kwargs=None, + strategy=None, + return_distance=False, + ): + """Compute the argkmin reduction. + + Parameters + ---------- + X : ndarray or CSR matrix of shape (n_samples_X, n_features) + Input data. + + Y : ndarray or CSR matrix of shape (n_samples_Y, n_features) + Input data. + + k : int + The k for the argkmin reduction. + + metric : str, default='euclidean' + The distance metric to use for argkmin. + For a list of available metrics, see the documentation of + :class:`~sklearn.metrics.DistanceMetric`. + + chunk_size : int, default=None, + The number of vectors per chunk. If None (default) looks-up in + scikit-learn configuration for `pairwise_dist_chunk_size`, + and use 256 if it is not set. + + metric_kwargs : dict, default=None + Keyword arguments to pass to specified metric function. + + strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None + The chunking strategy defining which dataset parallelization are made on. + + For both strategies the computations happens with two nested loops, + respectively on chunks of X and chunks of Y. + Strategies differs on which loop (outer or inner) is made to run + in parallel with the Cython `prange` construct: + + - 'parallel_on_X' dispatches chunks of X uniformly on threads. + Each thread then iterates on all the chunks of Y. This strategy is + embarrassingly parallel and comes with no datastructures + synchronisation. + + - 'parallel_on_Y' dispatches chunks of Y uniformly on threads. + Each thread processes all the chunks of X in turn. This strategy is + a sequence of embarrassingly parallel subtasks (the inner loop on Y + chunks) with intermediate datastructures synchronisation at each + iteration of the sequential outer loop on X chunks. + + - 'auto' relies on a simple heuristic to choose between + 'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough, + 'parallel_on_X' is usually the most efficient strategy. + When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y' + brings more opportunity for parallelism and is therefore more efficient + + - None (default) looks-up in scikit-learn configuration for + `pairwise_dist_parallel_strategy`, and use 'auto' if it is not set. + + return_distance : boolean, default=False + Return distances between each X vector and its + argkmin if set to True. + + Returns + ------- + If return_distance=False: + - argkmin_indices : ndarray of shape (n_samples_X, k) + Indices of the argkmin for each vector in X. + + If return_distance=True: + - argkmin_distances : ndarray of shape (n_samples_X, k) + Distances to the argkmin for each vector in X. + - argkmin_indices : ndarray of shape (n_samples_X, k) + Indices of the argkmin for each vector in X. + + Notes + ----- + This classmethod inspects the arguments values to dispatch to the + dtype-specialized implementation of :class:`ArgKmin`. + + This allows decoupling the API entirely from the implementation details + whilst maintaining RAII: all temporarily allocated datastructures necessary + for the concrete implementation are therefore freed when this classmethod + returns. + """ + if X.dtype == Y.dtype == np.float64: + return ArgKmin64.compute( + X=X, + Y=Y, + k=k, + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + return_distance=return_distance, + ) + + if X.dtype == Y.dtype == np.float32: + return ArgKmin32.compute( + X=X, + Y=Y, + k=k, + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + return_distance=return_distance, + ) + + raise ValueError( + "Only float64 or float32 datasets pairs are supported at this time, " + f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}." + ) + + +class RadiusNeighbors(BaseDistancesReductionDispatcher): + """Compute radius-based neighbors for two sets of vectors. + + For each row-vector X[i] of the queries X, find all the indices j of + row-vectors in Y such that: + + dist(X[i], Y[j]) <= radius + + The distance function `dist` depends on the values of the `metric` + and `metric_kwargs` parameters. + + This class is not meant to be instantiated, one should only use + its :meth:`compute` classmethod which handles allocation and + deallocation consistently. + """ + + @classmethod + def compute( + cls, + X, + Y, + radius, + metric="euclidean", + chunk_size=None, + metric_kwargs=None, + strategy=None, + return_distance=False, + sort_results=False, + ): + """Return the results of the reduction for the given arguments. + + Parameters + ---------- + X : ndarray or CSR matrix of shape (n_samples_X, n_features) + Input data. + + Y : ndarray or CSR matrix of shape (n_samples_Y, n_features) + Input data. + + radius : float + The radius defining the neighborhood. + + metric : str, default='euclidean' + The distance metric to use. + For a list of available metrics, see the documentation of + :class:`~sklearn.metrics.DistanceMetric`. + + chunk_size : int, default=None, + The number of vectors per chunk. If None (default) looks-up in + scikit-learn configuration for `pairwise_dist_chunk_size`, + and use 256 if it is not set. + + metric_kwargs : dict, default=None + Keyword arguments to pass to specified metric function. + + strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None + The chunking strategy defining which dataset parallelization are made on. + + For both strategies the computations happens with two nested loops, + respectively on chunks of X and chunks of Y. + Strategies differs on which loop (outer or inner) is made to run + in parallel with the Cython `prange` construct: + + - 'parallel_on_X' dispatches chunks of X uniformly on threads. + Each thread then iterates on all the chunks of Y. This strategy is + embarrassingly parallel and comes with no datastructures + synchronisation. + + - 'parallel_on_Y' dispatches chunks of Y uniformly on threads. + Each thread processes all the chunks of X in turn. This strategy is + a sequence of embarrassingly parallel subtasks (the inner loop on Y + chunks) with intermediate datastructures synchronisation at each + iteration of the sequential outer loop on X chunks. + + - 'auto' relies on a simple heuristic to choose between + 'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough, + 'parallel_on_X' is usually the most efficient strategy. + When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y' + brings more opportunity for parallelism and is therefore more efficient + despite the synchronization step at each iteration of the outer loop + on chunks of `X`. + + - None (default) looks-up in scikit-learn configuration for + `pairwise_dist_parallel_strategy`, and use 'auto' if it is not set. + + return_distance : boolean, default=False + Return distances between each X vector and its neighbors if set to True. + + sort_results : boolean, default=False + Sort results with respect to distances between each X vector and its + neighbors if set to True. + + Returns + ------- + If return_distance=False: + - neighbors_indices : ndarray of n_samples_X ndarray + Indices of the neighbors for each vector in X. + + If return_distance=True: + - neighbors_indices : ndarray of n_samples_X ndarray + Indices of the neighbors for each vector in X. + - neighbors_distances : ndarray of n_samples_X ndarray + Distances to the neighbors for each vector in X. + + Notes + ----- + This classmethod inspects the arguments values to dispatch to the + dtype-specialized implementation of :class:`RadiusNeighbors`. + + This allows decoupling the API entirely from the implementation details + whilst maintaining RAII: all temporarily allocated datastructures necessary + for the concrete implementation are therefore freed when this classmethod + returns. + """ + if X.dtype == Y.dtype == np.float64: + return RadiusNeighbors64.compute( + X=X, + Y=Y, + radius=radius, + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + sort_results=sort_results, + return_distance=return_distance, + ) + + if X.dtype == Y.dtype == np.float32: + return RadiusNeighbors32.compute( + X=X, + Y=Y, + radius=radius, + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + sort_results=sort_results, + return_distance=return_distance, + ) + + raise ValueError( + "Only float64 or float32 datasets pairs are supported at this time, " + f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}." + ) + + +class ArgKminClassMode(BaseDistancesReductionDispatcher): + """Compute the argkmin of row vectors of X on the ones of Y with labels. + + For each row vector of X, computes the indices of k first the rows + vectors of Y with the smallest distances. Computes weighted mode of labels. + + ArgKminClassMode is typically used to perform bruteforce k-nearest neighbors + queries when the weighted mode of the labels for the k-nearest neighbors + are required, such as in `predict` methods. + + This class is not meant to be instantiated, one should only use + its :meth:`compute` classmethod which handles allocation and + deallocation consistently. + """ + + @classmethod + def valid_metrics(cls) -> List[str]: + excluded = { + # Euclidean is technically usable for ArgKminClassMode + # but its current implementation would not be competitive. + # TODO: implement Euclidean specialization using GEMM. + "euclidean", + "sqeuclidean", + } + return list(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded) + + @classmethod + def compute( + cls, + X, + Y, + k, + weights, + Y_labels, + unique_Y_labels, + metric="euclidean", + chunk_size=None, + metric_kwargs=None, + strategy=None, + ): + """Compute the argkmin reduction. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + The input array to be labelled. + + Y : ndarray of shape (n_samples_Y, n_features) + The input array whose class membership are provided through the + `Y_labels` parameter. + + k : int + The number of nearest neighbors to consider. + + weights : ndarray + The weights applied over the `Y_labels` of `Y` when computing the + weighted mode of the labels. + + Y_labels : ndarray + An array containing the index of the class membership of the + associated samples in `Y`. This is used in labeling `X`. + + unique_Y_labels : ndarray + An array containing all unique indices contained in the + corresponding `Y_labels` array. + + metric : str, default='euclidean' + The distance metric to use. For a list of available metrics, see + the documentation of :class:`~sklearn.metrics.DistanceMetric`. + Currently does not support `'precomputed'`. + + chunk_size : int, default=None, + The number of vectors per chunk. If None (default) looks-up in + scikit-learn configuration for `pairwise_dist_chunk_size`, + and use 256 if it is not set. + + metric_kwargs : dict, default=None + Keyword arguments to pass to specified metric function. + + strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None + The chunking strategy defining which dataset parallelization are made on. + + For both strategies the computations happens with two nested loops, + respectively on chunks of X and chunks of Y. + Strategies differs on which loop (outer or inner) is made to run + in parallel with the Cython `prange` construct: + + - 'parallel_on_X' dispatches chunks of X uniformly on threads. + Each thread then iterates on all the chunks of Y. This strategy is + embarrassingly parallel and comes with no datastructures + synchronisation. + + - 'parallel_on_Y' dispatches chunks of Y uniformly on threads. + Each thread processes all the chunks of X in turn. This strategy is + a sequence of embarrassingly parallel subtasks (the inner loop on Y + chunks) with intermediate datastructures synchronisation at each + iteration of the sequential outer loop on X chunks. + + - 'auto' relies on a simple heuristic to choose between + 'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough, + 'parallel_on_X' is usually the most efficient strategy. + When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y' + brings more opportunity for parallelism and is therefore more efficient + despite the synchronization step at each iteration of the outer loop + on chunks of `X`. + + - None (default) looks-up in scikit-learn configuration for + `pairwise_dist_parallel_strategy`, and use 'auto' if it is not set. + + Returns + ------- + probabilities : ndarray of shape (n_samples_X, n_classes) + An array containing the class probabilities for each sample. + + Notes + ----- + This classmethod is responsible for introspecting the arguments + values to dispatch to the most appropriate implementation of + :class:`PairwiseDistancesArgKmin`. + + This allows decoupling the API entirely from the implementation details + whilst maintaining RAII: all temporarily allocated datastructures necessary + for the concrete implementation are therefore freed when this classmethod + returns. + """ + if weights not in {"uniform", "distance"}: + raise ValueError( + "Only the 'uniform' or 'distance' weights options are supported" + f" at this time. Got: {weights=}." + ) + if X.dtype == Y.dtype == np.float64: + return ArgKminClassMode64.compute( + X=X, + Y=Y, + k=k, + weights=weights, + Y_labels=np.array(Y_labels, dtype=np.intp), + unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp), + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + ) + + if X.dtype == Y.dtype == np.float32: + return ArgKminClassMode32.compute( + X=X, + Y=Y, + k=k, + weights=weights, + Y_labels=np.array(Y_labels, dtype=np.intp), + unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp), + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + ) + + raise ValueError( + "Only float64 or float32 datasets pairs are supported at this time, " + f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}." + ) + + +class RadiusNeighborsClassMode(BaseDistancesReductionDispatcher): + """Compute radius-based class modes of row vectors of X using the + those of Y. + + For each row-vector X[i] of the queries X, find all the indices j of + row-vectors in Y such that: + + dist(X[i], Y[j]) <= radius + + RadiusNeighborsClassMode is typically used to perform bruteforce + radius neighbors queries when the weighted mode of the labels for + the nearest neighbors within the specified radius are required, + such as in `predict` methods. + + This class is not meant to be instantiated, one should only use + its :meth:`compute` classmethod which handles allocation and + deallocation consistently. + """ + + @classmethod + def valid_metrics(cls) -> List[str]: + excluded = { + # Euclidean is technically usable for RadiusNeighborsClassMode + # but it would not be competitive. + # TODO: implement Euclidean specialization using GEMM. + "euclidean", + "sqeuclidean", + } + return sorted(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded) + + @classmethod + def compute( + cls, + X, + Y, + radius, + weights, + Y_labels, + unique_Y_labels, + outlier_label, + metric="euclidean", + chunk_size=None, + metric_kwargs=None, + strategy=None, + ): + """Return the results of the reduction for the given arguments. + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + The input array to be labelled. + Y : ndarray of shape (n_samples_Y, n_features) + The input array whose class membership is provided through + the `Y_labels` parameter. + radius : float + The radius defining the neighborhood. + weights : ndarray + The weights applied to the `Y_labels` when computing the + weighted mode of the labels. + Y_labels : ndarray + An array containing the index of the class membership of the + associated samples in `Y`. This is used in labeling `X`. + unique_Y_labels : ndarray + An array containing all unique class labels. + outlier_label : int, default=None + Label for outlier samples (samples with no neighbors in given + radius). In the default case when the value is None if any + outlier is detected, a ValueError will be raised. The outlier + label should be selected from among the unique 'Y' labels. If + it is specified with a different value a warning will be raised + and all class probabilities of outliers will be assigned to be 0. + metric : str, default='euclidean' + The distance metric to use. For a list of available metrics, see + the documentation of :class:`~sklearn.metrics.DistanceMetric`. + Currently does not support `'precomputed'`. + chunk_size : int, default=None, + The number of vectors per chunk. If None (default) looks-up in + scikit-learn configuration for `pairwise_dist_chunk_size`, + and use 256 if it is not set. + metric_kwargs : dict, default=None + Keyword arguments to pass to specified metric function. + strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None + The chunking strategy defining which dataset parallelization are made on. + For both strategies the computations happens with two nested loops, + respectively on chunks of X and chunks of Y. + Strategies differs on which loop (outer or inner) is made to run + in parallel with the Cython `prange` construct: + - 'parallel_on_X' dispatches chunks of X uniformly on threads. + Each thread then iterates on all the chunks of Y. This strategy is + embarrassingly parallel and comes with no datastructures + synchronisation. + - 'parallel_on_Y' dispatches chunks of Y uniformly on threads. + Each thread processes all the chunks of X in turn. This strategy is + a sequence of embarrassingly parallel subtasks (the inner loop on Y + chunks) with intermediate datastructures synchronisation at each + iteration of the sequential outer loop on X chunks. + - 'auto' relies on a simple heuristic to choose between + 'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough, + 'parallel_on_X' is usually the most efficient strategy. + When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y' + brings more opportunity for parallelism and is therefore more efficient + despite the synchronization step at each iteration of the outer loop + on chunks of `X`. + - None (default) looks-up in scikit-learn configuration for + `pairwise_dist_parallel_strategy`, and use 'auto' if it is not set. + Returns + ------- + probabilities : ndarray of shape (n_samples_X, n_classes) + An array containing the class probabilities for each sample. + """ + if weights not in {"uniform", "distance"}: + raise ValueError( + "Only the 'uniform' or 'distance' weights options are supported" + f" at this time. Got: {weights=}." + ) + if X.dtype == Y.dtype == np.float64: + return RadiusNeighborsClassMode64.compute( + X=X, + Y=Y, + radius=radius, + weights=weights, + Y_labels=np.array(Y_labels, dtype=np.intp), + unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp), + outlier_label=outlier_label, + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + ) + + if X.dtype == Y.dtype == np.float32: + return RadiusNeighborsClassMode32.compute( + X=X, + Y=Y, + radius=radius, + weights=weights, + Y_labels=np.array(Y_labels, dtype=np.intp), + unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp), + outlier_label=outlier_label, + metric=metric, + chunk_size=chunk_size, + metric_kwargs=metric_kwargs, + strategy=strategy, + ) + + raise ValueError( + "Only float64 or float32 datasets pairs are supported at this time, " + f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}." + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..266a4774f643cab495ea0ea7d31b97953891de50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd new file mode 100644 index 0000000000000000000000000000000000000000..72090adabcdd9347b8d091cbbf701959f3ea3ed3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd @@ -0,0 +1,396 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_middle_term_computer.pxd.tp'. +# Changes must be made there. + +from libcpp.vector cimport vector + +from ...utils._typedefs cimport float64_t, float32_t, int32_t, intp_t + + +cdef void _middle_term_sparse_sparse_64( + const float64_t[:] X_data, + const int32_t[:] X_indices, + const int32_t[:] X_indptr, + intp_t X_start, + intp_t X_end, + const float64_t[:] Y_data, + const int32_t[:] Y_indices, + const int32_t[:] Y_indptr, + intp_t Y_start, + intp_t Y_end, + float64_t * D, +) noexcept nogil + + + +cdef class MiddleTermComputer64: + cdef: + intp_t effective_n_threads + intp_t chunks_n_threads + intp_t dist_middle_terms_chunks_size + intp_t n_features + intp_t chunk_size + + # Buffers for the `-2 * X_c @ Y_c.T` term computed via GEMM + vector[vector[float64_t]] dist_middle_terms_chunks + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_parallel_init(self, intp_t thread_num) noexcept nogil + + cdef void _parallel_on_X_init_chunk( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_init(self) noexcept nogil + + cdef void _parallel_on_Y_parallel_init( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class DenseDenseMiddleTermComputer64(MiddleTermComputer64): + cdef: + const float64_t[:, ::1] X + const float64_t[:, ::1] Y + + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_init_chunk( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_parallel_init( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class SparseSparseMiddleTermComputer64(MiddleTermComputer64): + cdef: + const float64_t[:] X_data + const int32_t[:] X_indices + const int32_t[:] X_indptr + + const float64_t[:] Y_data + const int32_t[:] Y_indices + const int32_t[:] Y_indptr + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class SparseDenseMiddleTermComputer64(MiddleTermComputer64): + cdef: + const float64_t[:] X_data + const int32_t[:] X_indices + const int32_t[:] X_indptr + + const float64_t[:, ::1] Y + + # We treat the dense-sparse case with the sparse-dense case by simply + # treating the dist_middle_terms as F-ordered and by swapping arguments. + # This attribute is meant to encode the case and adapt the logic + # accordingly. + bint c_ordered_middle_term + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class MiddleTermComputer32: + cdef: + intp_t effective_n_threads + intp_t chunks_n_threads + intp_t dist_middle_terms_chunks_size + intp_t n_features + intp_t chunk_size + + # Buffers for the `-2 * X_c @ Y_c.T` term computed via GEMM + vector[vector[float64_t]] dist_middle_terms_chunks + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_parallel_init(self, intp_t thread_num) noexcept nogil + + cdef void _parallel_on_X_init_chunk( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_init(self) noexcept nogil + + cdef void _parallel_on_Y_parallel_init( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class DenseDenseMiddleTermComputer32(MiddleTermComputer32): + cdef: + const float32_t[:, ::1] X + const float32_t[:, ::1] Y + + # Buffers for upcasting chunks of X and Y from 32bit to 64bit + vector[vector[float64_t]] X_c_upcast + vector[vector[float64_t]] Y_c_upcast + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + cdef void _parallel_on_X_init_chunk( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_parallel_init( + self, + intp_t thread_num, + intp_t X_start, + intp_t X_end, + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class SparseSparseMiddleTermComputer32(MiddleTermComputer32): + cdef: + const float64_t[:] X_data + const int32_t[:] X_indices + const int32_t[:] X_indptr + + const float64_t[:] Y_data + const int32_t[:] Y_indices + const int32_t[:] Y_indptr + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil + + +cdef class SparseDenseMiddleTermComputer32(MiddleTermComputer32): + cdef: + const float64_t[:] X_data + const int32_t[:] X_indices + const int32_t[:] X_indptr + + const float32_t[:, ::1] Y + + # We treat the dense-sparse case with the sparse-dense case by simply + # treating the dist_middle_terms as F-ordered and by swapping arguments. + # This attribute is meant to encode the case and adapt the logic + # accordingly. + bint c_ordered_middle_term + + cdef void _parallel_on_X_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef void _parallel_on_Y_pre_compute_and_reduce_distances_on_chunks( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num + ) noexcept nogil + + cdef float64_t * _compute_dist_middle_terms( + self, + intp_t X_start, + intp_t X_end, + intp_t Y_start, + intp_t Y_end, + intp_t thread_num, + ) noexcept nogil diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..087980ff2b60167c4fe3faa2bad897115ce63763 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd new file mode 100644 index 0000000000000000000000000000000000000000..230997a61d25946410ea7c3599fb74fdb246374c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd @@ -0,0 +1,150 @@ +# WARNING: Do not edit this file directly. +# It is automatically generated from 'sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors.pxd.tp'. +# Changes must be made there. + +cimport numpy as cnp + +from libcpp.memory cimport shared_ptr +from libcpp.vector cimport vector +from cython cimport final + +from ...utils._typedefs cimport intp_t, float64_t + +cnp.import_array() + +###################### +## std::vector to np.ndarray coercion +# As type covariance is not supported for C++ containers via Cython, +# we need to redefine fused types. +ctypedef fused vector_double_intp_t: + vector[intp_t] + vector[float64_t] + + +ctypedef fused vector_vector_double_intp_t: + vector[vector[intp_t]] + vector[vector[float64_t]] + +cdef cnp.ndarray[object, ndim=1] coerce_vectors_to_nd_arrays( + shared_ptr[vector_vector_double_intp_t] vecs +) + +##################### + +from ._base cimport BaseDistancesReduction64 +from ._middle_term_computer cimport MiddleTermComputer64 + +cdef class RadiusNeighbors64(BaseDistancesReduction64): + """float64 implementation of the RadiusNeighbors.""" + + cdef: + float64_t radius + + # DistanceMetric64 compute rank-preserving surrogate distance via rdist + # which are proxies necessitating less computations. + # We get the equivalent for the radius to be able to compare it against + # vectors' rank-preserving surrogate distances. + float64_t r_radius + + # Neighbors indices and distances are returned as np.ndarrays of np.ndarrays. + # + # For this implementation, we want resizable buffers which we will wrap + # into numpy arrays at the end. std::vector comes as a handy container + # for interacting efficiently with resizable buffers. + # + # Though it is possible to access their buffer address with + # std::vector::data, they can't be stolen: buffers lifetime + # is tied to their std::vector and are deallocated when + # std::vectors are. + # + # To solve this, we dynamically allocate std::vectors and then + # encapsulate them in a StdVectorSentinel responsible for + # freeing them when the associated np.ndarray is freed. + # + # Shared pointers (defined via shared_ptr) are use for safer memory management. + # Unique pointers (defined via unique_ptr) can't be used as datastructures + # are shared across threads for parallel_on_X; see _parallel_on_X_init_chunk. + shared_ptr[vector[vector[intp_t]]] neigh_indices + shared_ptr[vector[vector[float64_t]]] neigh_distances + + # Used as array of pointers to private datastructures used in threads. + vector[shared_ptr[vector[vector[intp_t]]]] neigh_indices_chunks + vector[shared_ptr[vector[vector[float64_t]]]] neigh_distances_chunks + + bint sort_results + + @final + cdef void _merge_vectors( + self, + intp_t idx, + intp_t num_threads, + ) noexcept nogil + + +cdef class EuclideanRadiusNeighbors64(RadiusNeighbors64): + """EuclideanDistance-specialisation of RadiusNeighbors64.""" + cdef: + MiddleTermComputer64 middle_term_computer + const float64_t[::1] X_norm_squared + const float64_t[::1] Y_norm_squared + + bint use_squared_distances + +from ._base cimport BaseDistancesReduction32 +from ._middle_term_computer cimport MiddleTermComputer32 + +cdef class RadiusNeighbors32(BaseDistancesReduction32): + """float32 implementation of the RadiusNeighbors.""" + + cdef: + float64_t radius + + # DistanceMetric32 compute rank-preserving surrogate distance via rdist + # which are proxies necessitating less computations. + # We get the equivalent for the radius to be able to compare it against + # vectors' rank-preserving surrogate distances. + float64_t r_radius + + # Neighbors indices and distances are returned as np.ndarrays of np.ndarrays. + # + # For this implementation, we want resizable buffers which we will wrap + # into numpy arrays at the end. std::vector comes as a handy container + # for interacting efficiently with resizable buffers. + # + # Though it is possible to access their buffer address with + # std::vector::data, they can't be stolen: buffers lifetime + # is tied to their std::vector and are deallocated when + # std::vectors are. + # + # To solve this, we dynamically allocate std::vectors and then + # encapsulate them in a StdVectorSentinel responsible for + # freeing them when the associated np.ndarray is freed. + # + # Shared pointers (defined via shared_ptr) are use for safer memory management. + # Unique pointers (defined via unique_ptr) can't be used as datastructures + # are shared across threads for parallel_on_X; see _parallel_on_X_init_chunk. + shared_ptr[vector[vector[intp_t]]] neigh_indices + shared_ptr[vector[vector[float64_t]]] neigh_distances + + # Used as array of pointers to private datastructures used in threads. + vector[shared_ptr[vector[vector[intp_t]]]] neigh_indices_chunks + vector[shared_ptr[vector[vector[float64_t]]]] neigh_distances_chunks + + bint sort_results + + @final + cdef void _merge_vectors( + self, + intp_t idx, + intp_t num_threads, + ) noexcept nogil + + +cdef class EuclideanRadiusNeighbors32(RadiusNeighbors32): + """EuclideanDistance-specialisation of RadiusNeighbors32.""" + cdef: + MiddleTermComputer32 middle_term_computer + const float64_t[::1] X_norm_squared + const float64_t[::1] Y_norm_squared + + bint use_squared_distances diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors_classmode.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors_classmode.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c00e9eb5c3363d4b638f58566d83c4b504001b17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_distances_reduction/_radius_neighbors_classmode.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..24f10540915700f3eaa65e4713ad34169ecf071a Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_pairwise_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bda87a4f4301c5a67235bb8da835f297b8feb351 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/confusion_matrix.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/confusion_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2163b4fa9fbd99b50f946eb6e359ae73de4e869 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/confusion_matrix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/det_curve.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/det_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ed0f305897341314dfcb8ad2300c0d3ddc3e096 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/det_curve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/precision_recall_curve.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/precision_recall_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0439179e7838b1d63adcb137855401b57c83a55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/precision_recall_curve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/regression.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1bbd8c8f0283db65bca0ed6d84b6b765fd722e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/regression.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/roc_curve.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/roc_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c04dc6d0cb9af5d503f0081c67a1ce9f77843e6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/__pycache__/roc_curve.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/confusion_matrix.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/confusion_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..f0bda0dc73d39e156354399e874a54e32e1db056 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/confusion_matrix.py @@ -0,0 +1,482 @@ +from itertools import product + +import numpy as np + +from ...base import is_classifier +from ...utils import check_matplotlib_support +from ...utils.multiclass import unique_labels +from .. import confusion_matrix + + +class ConfusionMatrixDisplay: + """Confusion Matrix visualization. + + It is recommend to use + :func:`~sklearn.metrics.ConfusionMatrixDisplay.from_estimator` or + :func:`~sklearn.metrics.ConfusionMatrixDisplay.from_predictions` to + create a :class:`ConfusionMatrixDisplay`. All parameters are stored as + attributes. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + confusion_matrix : ndarray of shape (n_classes, n_classes) + Confusion matrix. + + display_labels : ndarray of shape (n_classes,), default=None + Display labels for plot. If None, display labels are set from 0 to + `n_classes - 1`. + + Attributes + ---------- + im_ : matplotlib AxesImage + Image representing the confusion matrix. + + text_ : ndarray of shape (n_classes, n_classes), dtype=matplotlib Text, \ + or None + Array of matplotlib axes. `None` if `include_values` is false. + + ax_ : matplotlib Axes + Axes with confusion matrix. + + figure_ : matplotlib Figure + Figure containing the confusion matrix. + + See Also + -------- + confusion_matrix : Compute Confusion Matrix to evaluate the accuracy of a + classification. + ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix + given an estimator, the data, and the label. + ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix + given the true and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... random_state=0) + >>> clf = SVC(random_state=0) + >>> clf.fit(X_train, y_train) + SVC(random_state=0) + >>> predictions = clf.predict(X_test) + >>> cm = confusion_matrix(y_test, predictions, labels=clf.classes_) + >>> disp = ConfusionMatrixDisplay(confusion_matrix=cm, + ... display_labels=clf.classes_) + >>> disp.plot() + <...> + >>> plt.show() + """ + + def __init__(self, confusion_matrix, *, display_labels=None): + self.confusion_matrix = confusion_matrix + self.display_labels = display_labels + + def plot( + self, + *, + include_values=True, + cmap="viridis", + xticks_rotation="horizontal", + values_format=None, + ax=None, + colorbar=True, + im_kw=None, + text_kw=None, + ): + """Plot visualization. + + Parameters + ---------- + include_values : bool, default=True + Includes values in confusion matrix. + + cmap : str or matplotlib Colormap, default='viridis' + Colormap recognized by matplotlib. + + xticks_rotation : {'vertical', 'horizontal'} or float, \ + default='horizontal' + Rotation of xtick labels. + + values_format : str, default=None + Format specification for values in confusion matrix. If `None`, + the format specification is 'd' or '.2g' whichever is shorter. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + colorbar : bool, default=True + Whether or not to add a colorbar to the plot. + + im_kw : dict, default=None + Dict with keywords passed to `matplotlib.pyplot.imshow` call. + + text_kw : dict, default=None + Dict with keywords passed to `matplotlib.pyplot.text` call. + + .. versionadded:: 1.2 + + Returns + ------- + display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` + Returns a :class:`~sklearn.metrics.ConfusionMatrixDisplay` instance + that contains all the information to plot the confusion matrix. + """ + check_matplotlib_support("ConfusionMatrixDisplay.plot") + import matplotlib.pyplot as plt + + if ax is None: + fig, ax = plt.subplots() + else: + fig = ax.figure + + cm = self.confusion_matrix + n_classes = cm.shape[0] + + default_im_kw = dict(interpolation="nearest", cmap=cmap) + im_kw = im_kw or {} + im_kw = {**default_im_kw, **im_kw} + text_kw = text_kw or {} + + self.im_ = ax.imshow(cm, **im_kw) + self.text_ = None + cmap_min, cmap_max = self.im_.cmap(0), self.im_.cmap(1.0) + + if include_values: + self.text_ = np.empty_like(cm, dtype=object) + + # print text with appropriate color depending on background + thresh = (cm.max() + cm.min()) / 2.0 + + for i, j in product(range(n_classes), range(n_classes)): + color = cmap_max if cm[i, j] < thresh else cmap_min + + if values_format is None: + text_cm = format(cm[i, j], ".2g") + if cm.dtype.kind != "f": + text_d = format(cm[i, j], "d") + if len(text_d) < len(text_cm): + text_cm = text_d + else: + text_cm = format(cm[i, j], values_format) + + default_text_kwargs = dict(ha="center", va="center", color=color) + text_kwargs = {**default_text_kwargs, **text_kw} + + self.text_[i, j] = ax.text(j, i, text_cm, **text_kwargs) + + if self.display_labels is None: + display_labels = np.arange(n_classes) + else: + display_labels = self.display_labels + if colorbar: + fig.colorbar(self.im_, ax=ax) + ax.set( + xticks=np.arange(n_classes), + yticks=np.arange(n_classes), + xticklabels=display_labels, + yticklabels=display_labels, + ylabel="True label", + xlabel="Predicted label", + ) + + ax.set_ylim((n_classes - 0.5, -0.5)) + plt.setp(ax.get_xticklabels(), rotation=xticks_rotation) + + self.figure_ = fig + self.ax_ = ax + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + labels=None, + sample_weight=None, + normalize=None, + display_labels=None, + include_values=True, + xticks_rotation="horizontal", + values_format=None, + cmap="viridis", + ax=None, + colorbar=True, + im_kw=None, + text_kw=None, + ): + """Plot Confusion Matrix given an estimator and some data. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : estimator instance + Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a classifier. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Target values. + + labels : array-like of shape (n_classes,), default=None + List of labels to index the confusion matrix. This may be used to + reorder or select a subset of labels. If `None` is given, those + that appear at least once in `y_true` or `y_pred` are used in + sorted order. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + normalize : {'true', 'pred', 'all'}, default=None + Either to normalize the counts display in the matrix: + + - if `'true'`, the confusion matrix is normalized over the true + conditions (e.g. rows); + - if `'pred'`, the confusion matrix is normalized over the + predicted conditions (e.g. columns); + - if `'all'`, the confusion matrix is normalized by the total + number of samples; + - if `None` (default), the confusion matrix will not be normalized. + + display_labels : array-like of shape (n_classes,), default=None + Target names used for plotting. By default, `labels` will be used + if it is defined, otherwise the unique labels of `y_true` and + `y_pred` will be used. + + include_values : bool, default=True + Includes values in confusion matrix. + + xticks_rotation : {'vertical', 'horizontal'} or float, \ + default='horizontal' + Rotation of xtick labels. + + values_format : str, default=None + Format specification for values in confusion matrix. If `None`, the + format specification is 'd' or '.2g' whichever is shorter. + + cmap : str or matplotlib Colormap, default='viridis' + Colormap recognized by matplotlib. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + colorbar : bool, default=True + Whether or not to add a colorbar to the plot. + + im_kw : dict, default=None + Dict with keywords passed to `matplotlib.pyplot.imshow` call. + + text_kw : dict, default=None + Dict with keywords passed to `matplotlib.pyplot.text` call. + + .. versionadded:: 1.2 + + Returns + ------- + display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` + + See Also + -------- + ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix + given the true and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import ConfusionMatrixDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = SVC(random_state=0) + >>> clf.fit(X_train, y_train) + SVC(random_state=0) + >>> ConfusionMatrixDisplay.from_estimator( + ... clf, X_test, y_test) + <...> + >>> plt.show() + """ + method_name = f"{cls.__name__}.from_estimator" + check_matplotlib_support(method_name) + if not is_classifier(estimator): + raise ValueError(f"{method_name} only supports classifiers") + y_pred = estimator.predict(X) + + return cls.from_predictions( + y, + y_pred, + sample_weight=sample_weight, + labels=labels, + normalize=normalize, + display_labels=display_labels, + include_values=include_values, + cmap=cmap, + ax=ax, + xticks_rotation=xticks_rotation, + values_format=values_format, + colorbar=colorbar, + im_kw=im_kw, + text_kw=text_kw, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_pred, + *, + labels=None, + sample_weight=None, + normalize=None, + display_labels=None, + include_values=True, + xticks_rotation="horizontal", + values_format=None, + cmap="viridis", + ax=None, + colorbar=True, + im_kw=None, + text_kw=None, + ): + """Plot Confusion Matrix given true and predicted labels. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True labels. + + y_pred : array-like of shape (n_samples,) + The predicted labels given by the method `predict` of an + classifier. + + labels : array-like of shape (n_classes,), default=None + List of labels to index the confusion matrix. This may be used to + reorder or select a subset of labels. If `None` is given, those + that appear at least once in `y_true` or `y_pred` are used in + sorted order. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + normalize : {'true', 'pred', 'all'}, default=None + Either to normalize the counts display in the matrix: + + - if `'true'`, the confusion matrix is normalized over the true + conditions (e.g. rows); + - if `'pred'`, the confusion matrix is normalized over the + predicted conditions (e.g. columns); + - if `'all'`, the confusion matrix is normalized by the total + number of samples; + - if `None` (default), the confusion matrix will not be normalized. + + display_labels : array-like of shape (n_classes,), default=None + Target names used for plotting. By default, `labels` will be used + if it is defined, otherwise the unique labels of `y_true` and + `y_pred` will be used. + + include_values : bool, default=True + Includes values in confusion matrix. + + xticks_rotation : {'vertical', 'horizontal'} or float, \ + default='horizontal' + Rotation of xtick labels. + + values_format : str, default=None + Format specification for values in confusion matrix. If `None`, the + format specification is 'd' or '.2g' whichever is shorter. + + cmap : str or matplotlib Colormap, default='viridis' + Colormap recognized by matplotlib. + + ax : matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + colorbar : bool, default=True + Whether or not to add a colorbar to the plot. + + im_kw : dict, default=None + Dict with keywords passed to `matplotlib.pyplot.imshow` call. + + text_kw : dict, default=None + Dict with keywords passed to `matplotlib.pyplot.text` call. + + .. versionadded:: 1.2 + + Returns + ------- + display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` + + See Also + -------- + ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix + given an estimator, the data, and the label. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import ConfusionMatrixDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = SVC(random_state=0) + >>> clf.fit(X_train, y_train) + SVC(random_state=0) + >>> y_pred = clf.predict(X_test) + >>> ConfusionMatrixDisplay.from_predictions( + ... y_test, y_pred) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_predictions") + + if display_labels is None: + if labels is None: + display_labels = unique_labels(y_true, y_pred) + else: + display_labels = labels + + cm = confusion_matrix( + y_true, + y_pred, + sample_weight=sample_weight, + labels=labels, + normalize=normalize, + ) + + disp = cls(confusion_matrix=cm, display_labels=display_labels) + + return disp.plot( + include_values=include_values, + cmap=cmap, + ax=ax, + xticks_rotation=xticks_rotation, + values_format=values_format, + colorbar=colorbar, + im_kw=im_kw, + text_kw=text_kw, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/det_curve.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/det_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..e7336b10f5bb62d1b6e8ce9181c5b55487e6ba57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/det_curve.py @@ -0,0 +1,332 @@ +import scipy as sp + +from ...utils._plotting import _BinaryClassifierCurveDisplayMixin +from .._ranking import det_curve + + +class DetCurveDisplay(_BinaryClassifierCurveDisplayMixin): + """DET curve visualization. + + It is recommend to use :func:`~sklearn.metrics.DetCurveDisplay.from_estimator` + or :func:`~sklearn.metrics.DetCurveDisplay.from_predictions` to create a + visualizer. All parameters are stored as attributes. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + fpr : ndarray + False positive rate. + + fnr : ndarray + False negative rate. + + estimator_name : str, default=None + Name of estimator. If None, the estimator name is not shown. + + pos_label : int, float, bool or str, default=None + The label of the positive class. + + Attributes + ---------- + line_ : matplotlib Artist + DET Curve. + + ax_ : matplotlib Axes + Axes with DET Curve. + + figure_ : matplotlib Figure + Figure containing the curve. + + See Also + -------- + det_curve : Compute error rates for different probability thresholds. + DetCurveDisplay.from_estimator : Plot DET curve given an estimator and + some data. + DetCurveDisplay.from_predictions : Plot DET curve given the true and + predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import det_curve, DetCurveDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(n_samples=1000, random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.4, random_state=0) + >>> clf = SVC(random_state=0).fit(X_train, y_train) + >>> y_pred = clf.decision_function(X_test) + >>> fpr, fnr, _ = det_curve(y_test, y_pred) + >>> display = DetCurveDisplay( + ... fpr=fpr, fnr=fnr, estimator_name="SVC" + ... ) + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__(self, *, fpr, fnr, estimator_name=None, pos_label=None): + self.fpr = fpr + self.fnr = fnr + self.estimator_name = estimator_name + self.pos_label = pos_label + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + sample_weight=None, + response_method="auto", + pos_label=None, + name=None, + ax=None, + **kwargs, + ): + """Plot DET curve given an estimator and data. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : estimator instance + Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a classifier. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + response_method : {'predict_proba', 'decision_function', 'auto'} \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the predicted target response. If set + to 'auto', :term:`predict_proba` is tried first and if it does not + exist :term:`decision_function` is tried next. + + pos_label : int, float, bool or str, default=None + The label of the positive class. When `pos_label=None`, if `y_true` + is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an + error will be raised. + + name : str, default=None + Name of DET curve for labeling. If `None`, use the name of the + estimator. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Additional keywords arguments passed to matplotlib `plot` function. + + Returns + ------- + display : :class:`~sklearn.metrics.DetCurveDisplay` + Object that stores computed values. + + See Also + -------- + det_curve : Compute error rates for different probability thresholds. + DetCurveDisplay.from_predictions : Plot DET curve given the true and + predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import DetCurveDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(n_samples=1000, random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.4, random_state=0) + >>> clf = SVC(random_state=0).fit(X_train, y_train) + >>> DetCurveDisplay.from_estimator( + ... clf, X_test, y_test) + <...> + >>> plt.show() + """ + y_pred, pos_label, name = cls._validate_and_get_response_values( + estimator, + X, + y, + response_method=response_method, + pos_label=pos_label, + name=name, + ) + + return cls.from_predictions( + y_true=y, + y_pred=y_pred, + sample_weight=sample_weight, + name=name, + ax=ax, + pos_label=pos_label, + **kwargs, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_pred, + *, + sample_weight=None, + pos_label=None, + name=None, + ax=None, + **kwargs, + ): + """Plot the DET curve given the true and predicted labels. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True labels. + + y_pred : array-like of shape (n_samples,) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by `decision_function` on some classifiers). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + pos_label : int, float, bool or str, default=None + The label of the positive class. When `pos_label=None`, if `y_true` + is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an + error will be raised. + + name : str, default=None + Name of DET curve for labeling. If `None`, name will be set to + `"Classifier"`. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Additional keywords arguments passed to matplotlib `plot` function. + + Returns + ------- + display : :class:`~sklearn.metrics.DetCurveDisplay` + Object that stores computed values. + + See Also + -------- + det_curve : Compute error rates for different probability thresholds. + DetCurveDisplay.from_estimator : Plot DET curve given an estimator and + some data. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import DetCurveDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(n_samples=1000, random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, test_size=0.4, random_state=0) + >>> clf = SVC(random_state=0).fit(X_train, y_train) + >>> y_pred = clf.decision_function(X_test) + >>> DetCurveDisplay.from_predictions( + ... y_test, y_pred) + <...> + >>> plt.show() + """ + pos_label_validated, name = cls._validate_from_predictions_params( + y_true, y_pred, sample_weight=sample_weight, pos_label=pos_label, name=name + ) + + fpr, fnr, _ = det_curve( + y_true, + y_pred, + pos_label=pos_label, + sample_weight=sample_weight, + ) + + viz = cls( + fpr=fpr, + fnr=fnr, + estimator_name=name, + pos_label=pos_label_validated, + ) + + return viz.plot(ax=ax, name=name, **kwargs) + + def plot(self, ax=None, *, name=None, **kwargs): + """Plot visualization. + + Parameters + ---------- + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + name : str, default=None + Name of DET curve for labeling. If `None`, use `estimator_name` if + it is not `None`, otherwise no labeling is shown. + + **kwargs : dict + Additional keywords arguments passed to matplotlib `plot` function. + + Returns + ------- + display : :class:`~sklearn.metrics.DetCurveDisplay` + Object that stores computed values. + """ + self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name) + + line_kwargs = {} if name is None else {"label": name} + line_kwargs.update(**kwargs) + + (self.line_,) = self.ax_.plot( + sp.stats.norm.ppf(self.fpr), + sp.stats.norm.ppf(self.fnr), + **line_kwargs, + ) + info_pos_label = ( + f" (Positive label: {self.pos_label})" if self.pos_label is not None else "" + ) + + xlabel = "False Positive Rate" + info_pos_label + ylabel = "False Negative Rate" + info_pos_label + self.ax_.set(xlabel=xlabel, ylabel=ylabel) + + if "label" in line_kwargs: + self.ax_.legend(loc="lower right") + + ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999] + tick_locations = sp.stats.norm.ppf(ticks) + tick_labels = [ + "{:.0%}".format(s) if (100 * s).is_integer() else "{:.1%}".format(s) + for s in ticks + ] + self.ax_.set_xticks(tick_locations) + self.ax_.set_xticklabels(tick_labels) + self.ax_.set_xlim(-3, 3) + self.ax_.set_yticks(tick_locations) + self.ax_.set_yticklabels(tick_labels) + self.ax_.set_ylim(-3, 3) + + return self diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/precision_recall_curve.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/precision_recall_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..852dbf3981b2cdea0c2bd718a83e968036366c5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/precision_recall_curve.py @@ -0,0 +1,504 @@ +from collections import Counter + +from ...utils._plotting import _BinaryClassifierCurveDisplayMixin +from .._ranking import average_precision_score, precision_recall_curve + + +class PrecisionRecallDisplay(_BinaryClassifierCurveDisplayMixin): + """Precision Recall visualization. + + It is recommend to use + :func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator` or + :func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions` to create + a :class:`~sklearn.metrics.PrecisionRecallDisplay`. All parameters are + stored as attributes. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + precision : ndarray + Precision values. + + recall : ndarray + Recall values. + + average_precision : float, default=None + Average precision. If None, the average precision is not shown. + + estimator_name : str, default=None + Name of estimator. If None, then the estimator name is not shown. + + pos_label : int, float, bool or str, default=None + The class considered as the positive class. If None, the class will not + be shown in the legend. + + .. versionadded:: 0.24 + + prevalence_pos_label : float, default=None + The prevalence of the positive label. It is used for plotting the + chance level line. If None, the chance level line will not be plotted + even if `plot_chance_level` is set to True when plotting. + + .. versionadded:: 1.3 + + Attributes + ---------- + line_ : matplotlib Artist + Precision recall curve. + + chance_level_ : matplotlib Artist or None + The chance level line. It is `None` if the chance level is not plotted. + + .. versionadded:: 1.3 + + ax_ : matplotlib Axes + Axes with precision recall curve. + + figure_ : matplotlib Figure + Figure containing the curve. + + See Also + -------- + precision_recall_curve : Compute precision-recall pairs for different + probability thresholds. + PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given + a binary classifier. + PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve + using predictions from a binary classifier. + + Notes + ----- + The average precision (cf. :func:`~sklearn.metrics.average_precision_score`) in + scikit-learn is computed without any interpolation. To be consistent with + this metric, the precision-recall curve is plotted without any + interpolation as well (step-wise style). + + You can change this style by passing the keyword argument + `drawstyle="default"` in :meth:`plot`, :meth:`from_estimator`, or + :meth:`from_predictions`. However, the curve will not be strictly + consistent with the reported average precision. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import (precision_recall_curve, + ... PrecisionRecallDisplay) + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + ... random_state=0) + >>> clf = SVC(random_state=0) + >>> clf.fit(X_train, y_train) + SVC(random_state=0) + >>> predictions = clf.predict(X_test) + >>> precision, recall, _ = precision_recall_curve(y_test, predictions) + >>> disp = PrecisionRecallDisplay(precision=precision, recall=recall) + >>> disp.plot() + <...> + >>> plt.show() + """ + + def __init__( + self, + precision, + recall, + *, + average_precision=None, + estimator_name=None, + pos_label=None, + prevalence_pos_label=None, + ): + self.estimator_name = estimator_name + self.precision = precision + self.recall = recall + self.average_precision = average_precision + self.pos_label = pos_label + self.prevalence_pos_label = prevalence_pos_label + + def plot( + self, + ax=None, + *, + name=None, + plot_chance_level=False, + chance_level_kw=None, + **kwargs, + ): + """Plot visualization. + + Extra keyword arguments will be passed to matplotlib's `plot`. + + Parameters + ---------- + ax : Matplotlib Axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + name : str, default=None + Name of precision recall curve for labeling. If `None`, use + `estimator_name` if not `None`, otherwise no labeling is shown. + + plot_chance_level : bool, default=False + Whether to plot the chance level. The chance level is the prevalence + of the positive label computed from the data passed during + :meth:`from_estimator` or :meth:`from_predictions` call. + + .. versionadded:: 1.3 + + chance_level_kw : dict, default=None + Keyword arguments to be passed to matplotlib's `plot` for rendering + the chance level line. + + .. versionadded:: 1.3 + + **kwargs : dict + Keyword arguments to be passed to matplotlib's `plot`. + + Returns + ------- + display : :class:`~sklearn.metrics.PrecisionRecallDisplay` + Object that stores computed values. + + Notes + ----- + The average precision (cf. :func:`~sklearn.metrics.average_precision_score`) + in scikit-learn is computed without any interpolation. To be consistent + with this metric, the precision-recall curve is plotted without any + interpolation as well (step-wise style). + + You can change this style by passing the keyword argument + `drawstyle="default"`. However, the curve will not be strictly + consistent with the reported average precision. + """ + self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name) + + line_kwargs = {"drawstyle": "steps-post"} + if self.average_precision is not None and name is not None: + line_kwargs["label"] = f"{name} (AP = {self.average_precision:0.2f})" + elif self.average_precision is not None: + line_kwargs["label"] = f"AP = {self.average_precision:0.2f}" + elif name is not None: + line_kwargs["label"] = name + line_kwargs.update(**kwargs) + + (self.line_,) = self.ax_.plot(self.recall, self.precision, **line_kwargs) + + info_pos_label = ( + f" (Positive label: {self.pos_label})" if self.pos_label is not None else "" + ) + + xlabel = "Recall" + info_pos_label + ylabel = "Precision" + info_pos_label + self.ax_.set( + xlabel=xlabel, + xlim=(-0.01, 1.01), + ylabel=ylabel, + ylim=(-0.01, 1.01), + aspect="equal", + ) + + if plot_chance_level: + if self.prevalence_pos_label is None: + raise ValueError( + "You must provide prevalence_pos_label when constructing the " + "PrecisionRecallDisplay object in order to plot the chance " + "level line. Alternatively, you may use " + "PrecisionRecallDisplay.from_estimator or " + "PrecisionRecallDisplay.from_predictions " + "to automatically set prevalence_pos_label" + ) + + chance_level_line_kw = { + "label": f"Chance level (AP = {self.prevalence_pos_label:0.2f})", + "color": "k", + "linestyle": "--", + } + if chance_level_kw is not None: + chance_level_line_kw.update(chance_level_kw) + + (self.chance_level_,) = self.ax_.plot( + (0, 1), + (self.prevalence_pos_label, self.prevalence_pos_label), + **chance_level_line_kw, + ) + else: + self.chance_level_ = None + + if "label" in line_kwargs or plot_chance_level: + self.ax_.legend(loc="lower left") + + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + sample_weight=None, + pos_label=None, + drop_intermediate=False, + response_method="auto", + name=None, + ax=None, + plot_chance_level=False, + chance_level_kw=None, + **kwargs, + ): + """Plot precision-recall curve given an estimator and some data. + + Parameters + ---------- + estimator : estimator instance + Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a classifier. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + pos_label : int, float, bool or str, default=None + The class considered as the positive class when computing the + precision and recall metrics. By default, `estimators.classes_[1]` + is considered as the positive class. + + drop_intermediate : bool, default=False + Whether to drop some suboptimal thresholds which would not appear + on a plotted precision-recall curve. This is useful in order to + create lighter precision-recall curves. + + .. versionadded:: 1.3 + + response_method : {'predict_proba', 'decision_function', 'auto'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. If set to 'auto', + :term:`predict_proba` is tried first and if it does not exist + :term:`decision_function` is tried next. + + name : str, default=None + Name for labeling curve. If `None`, no name is used. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is created. + + plot_chance_level : bool, default=False + Whether to plot the chance level. The chance level is the prevalence + of the positive label computed from the data passed during + :meth:`from_estimator` or :meth:`from_predictions` call. + + .. versionadded:: 1.3 + + chance_level_kw : dict, default=None + Keyword arguments to be passed to matplotlib's `plot` for rendering + the chance level line. + + .. versionadded:: 1.3 + + **kwargs : dict + Keyword arguments to be passed to matplotlib's `plot`. + + Returns + ------- + display : :class:`~sklearn.metrics.PrecisionRecallDisplay` + + See Also + -------- + PrecisionRecallDisplay.from_predictions : Plot precision-recall curve + using estimated probabilities or output of decision function. + + Notes + ----- + The average precision (cf. :func:`~sklearn.metrics.average_precision_score`) + in scikit-learn is computed without any interpolation. To be consistent + with this metric, the precision-recall curve is plotted without any + interpolation as well (step-wise style). + + You can change this style by passing the keyword argument + `drawstyle="default"`. However, the curve will not be strictly + consistent with the reported average precision. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import PrecisionRecallDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression() + >>> clf.fit(X_train, y_train) + LogisticRegression() + >>> PrecisionRecallDisplay.from_estimator( + ... clf, X_test, y_test) + <...> + >>> plt.show() + """ + y_pred, pos_label, name = cls._validate_and_get_response_values( + estimator, + X, + y, + response_method=response_method, + pos_label=pos_label, + name=name, + ) + + return cls.from_predictions( + y, + y_pred, + sample_weight=sample_weight, + name=name, + pos_label=pos_label, + drop_intermediate=drop_intermediate, + ax=ax, + plot_chance_level=plot_chance_level, + chance_level_kw=chance_level_kw, + **kwargs, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_pred, + *, + sample_weight=None, + pos_label=None, + drop_intermediate=False, + name=None, + ax=None, + plot_chance_level=False, + chance_level_kw=None, + **kwargs, + ): + """Plot precision-recall curve given binary class predictions. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True binary labels. + + y_pred : array-like of shape (n_samples,) + Estimated probabilities or output of decision function. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + pos_label : int, float, bool or str, default=None + The class considered as the positive class when computing the + precision and recall metrics. + + drop_intermediate : bool, default=False + Whether to drop some suboptimal thresholds which would not appear + on a plotted precision-recall curve. This is useful in order to + create lighter precision-recall curves. + + .. versionadded:: 1.3 + + name : str, default=None + Name for labeling curve. If `None`, name will be set to + `"Classifier"`. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is created. + + plot_chance_level : bool, default=False + Whether to plot the chance level. The chance level is the prevalence + of the positive label computed from the data passed during + :meth:`from_estimator` or :meth:`from_predictions` call. + + .. versionadded:: 1.3 + + chance_level_kw : dict, default=None + Keyword arguments to be passed to matplotlib's `plot` for rendering + the chance level line. + + .. versionadded:: 1.3 + + **kwargs : dict + Keyword arguments to be passed to matplotlib's `plot`. + + Returns + ------- + display : :class:`~sklearn.metrics.PrecisionRecallDisplay` + + See Also + -------- + PrecisionRecallDisplay.from_estimator : Plot precision-recall curve + using an estimator. + + Notes + ----- + The average precision (cf. :func:`~sklearn.metrics.average_precision_score`) + in scikit-learn is computed without any interpolation. To be consistent + with this metric, the precision-recall curve is plotted without any + interpolation as well (step-wise style). + + You can change this style by passing the keyword argument + `drawstyle="default"`. However, the curve will not be strictly + consistent with the reported average precision. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import PrecisionRecallDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = LogisticRegression() + >>> clf.fit(X_train, y_train) + LogisticRegression() + >>> y_pred = clf.predict_proba(X_test)[:, 1] + >>> PrecisionRecallDisplay.from_predictions( + ... y_test, y_pred) + <...> + >>> plt.show() + """ + pos_label, name = cls._validate_from_predictions_params( + y_true, y_pred, sample_weight=sample_weight, pos_label=pos_label, name=name + ) + + precision, recall, _ = precision_recall_curve( + y_true, + y_pred, + pos_label=pos_label, + sample_weight=sample_weight, + drop_intermediate=drop_intermediate, + ) + average_precision = average_precision_score( + y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight + ) + + class_count = Counter(y_true) + prevalence_pos_label = class_count[pos_label] / sum(class_count.values()) + + viz = cls( + precision=precision, + recall=recall, + average_precision=average_precision, + estimator_name=name, + pos_label=pos_label, + prevalence_pos_label=prevalence_pos_label, + ) + + return viz.plot( + ax=ax, + name=name, + plot_chance_level=plot_chance_level, + chance_level_kw=chance_level_kw, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/regression.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/regression.py new file mode 100644 index 0000000000000000000000000000000000000000..393a9524e2af4c9f41e0099a2558ed93b70b368d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/regression.py @@ -0,0 +1,405 @@ +import numbers + +import numpy as np + +from ...utils import _safe_indexing, check_matplotlib_support, check_random_state + + +class PredictionErrorDisplay: + """Visualization of the prediction error of a regression model. + + This tool can display "residuals vs predicted" or "actual vs predicted" + using scatter plots to qualitatively assess the behavior of a regressor, + preferably on held-out data points. + + See the details in the docstrings of + :func:`~sklearn.metrics.PredictionErrorDisplay.from_estimator` or + :func:`~sklearn.metrics.PredictionErrorDisplay.from_predictions` to + create a visualizer. All parameters are stored as attributes. + + For general information regarding `scikit-learn` visualization tools, read + more in the :ref:`Visualization Guide `. + For details regarding interpreting these plots, refer to the + :ref:`Model Evaluation Guide `. + + .. versionadded:: 1.2 + + Parameters + ---------- + y_true : ndarray of shape (n_samples,) + True values. + + y_pred : ndarray of shape (n_samples,) + Prediction values. + + Attributes + ---------- + line_ : matplotlib Artist + Optimal line representing `y_true == y_pred`. Therefore, it is a + diagonal line for `kind="predictions"` and a horizontal line for + `kind="residuals"`. + + errors_lines_ : matplotlib Artist or None + Residual lines. If `with_errors=False`, then it is set to `None`. + + scatter_ : matplotlib Artist + Scatter data points. + + ax_ : matplotlib Axes + Axes with the different matplotlib axis. + + figure_ : matplotlib Figure + Figure containing the scatter and lines. + + See Also + -------- + PredictionErrorDisplay.from_estimator : Prediction error visualization + given an estimator and some data. + PredictionErrorDisplay.from_predictions : Prediction error visualization + given the true and predicted targets. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import Ridge + >>> from sklearn.metrics import PredictionErrorDisplay + >>> X, y = load_diabetes(return_X_y=True) + >>> ridge = Ridge().fit(X, y) + >>> y_pred = ridge.predict(X) + >>> display = PredictionErrorDisplay(y_true=y, y_pred=y_pred) + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__(self, *, y_true, y_pred): + self.y_true = y_true + self.y_pred = y_pred + + def plot( + self, + ax=None, + *, + kind="residual_vs_predicted", + scatter_kwargs=None, + line_kwargs=None, + ): + """Plot visualization. + + Extra keyword arguments will be passed to matplotlib's ``plot``. + + Parameters + ---------- + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + kind : {"actual_vs_predicted", "residual_vs_predicted"}, \ + default="residual_vs_predicted" + The type of plot to draw: + + - "actual_vs_predicted" draws the observed values (y-axis) vs. + the predicted values (x-axis). + - "residual_vs_predicted" draws the residuals, i.e. difference + between observed and predicted values, (y-axis) vs. the predicted + values (x-axis). + + scatter_kwargs : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.scatter` + call. + + line_kwargs : dict, default=None + Dictionary with keyword passed to the `matplotlib.pyplot.plot` + call to draw the optimal line. + + Returns + ------- + display : :class:`~sklearn.metrics.PredictionErrorDisplay` + + Object that stores computed values. + """ + check_matplotlib_support(f"{self.__class__.__name__}.plot") + + expected_kind = ("actual_vs_predicted", "residual_vs_predicted") + if kind not in expected_kind: + raise ValueError( + f"`kind` must be one of {', '.join(expected_kind)}. " + f"Got {kind!r} instead." + ) + + import matplotlib.pyplot as plt + + if scatter_kwargs is None: + scatter_kwargs = {} + if line_kwargs is None: + line_kwargs = {} + + default_scatter_kwargs = {"color": "tab:blue", "alpha": 0.8} + default_line_kwargs = {"color": "black", "alpha": 0.7, "linestyle": "--"} + + scatter_kwargs = {**default_scatter_kwargs, **scatter_kwargs} + line_kwargs = {**default_line_kwargs, **line_kwargs} + + if ax is None: + _, ax = plt.subplots() + + if kind == "actual_vs_predicted": + max_value = max(np.max(self.y_true), np.max(self.y_pred)) + min_value = min(np.min(self.y_true), np.min(self.y_pred)) + self.line_ = ax.plot( + [min_value, max_value], [min_value, max_value], **line_kwargs + )[0] + + x_data, y_data = self.y_pred, self.y_true + xlabel, ylabel = "Predicted values", "Actual values" + + self.scatter_ = ax.scatter(x_data, y_data, **scatter_kwargs) + + # force to have a squared axis + ax.set_aspect("equal", adjustable="datalim") + ax.set_xticks(np.linspace(min_value, max_value, num=5)) + ax.set_yticks(np.linspace(min_value, max_value, num=5)) + else: # kind == "residual_vs_predicted" + self.line_ = ax.plot( + [np.min(self.y_pred), np.max(self.y_pred)], + [0, 0], + **line_kwargs, + )[0] + self.scatter_ = ax.scatter( + self.y_pred, self.y_true - self.y_pred, **scatter_kwargs + ) + xlabel, ylabel = "Predicted values", "Residuals (actual - predicted)" + + ax.set(xlabel=xlabel, ylabel=ylabel) + + self.ax_ = ax + self.figure_ = ax.figure + + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + kind="residual_vs_predicted", + subsample=1_000, + random_state=None, + ax=None, + scatter_kwargs=None, + line_kwargs=None, + ): + """Plot the prediction error given a regressor and some data. + + For general information regarding `scikit-learn` visualization tools, + read more in the :ref:`Visualization Guide `. + For details regarding interpreting these plots, refer to the + :ref:`Model Evaluation Guide `. + + .. versionadded:: 1.2 + + Parameters + ---------- + estimator : estimator instance + Fitted regressor or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a regressor. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Target values. + + kind : {"actual_vs_predicted", "residual_vs_predicted"}, \ + default="residual_vs_predicted" + The type of plot to draw: + + - "actual_vs_predicted" draws the observed values (y-axis) vs. + the predicted values (x-axis). + - "residual_vs_predicted" draws the residuals, i.e. difference + between observed and predicted values, (y-axis) vs. the predicted + values (x-axis). + + subsample : float, int or None, default=1_000 + Sampling the samples to be shown on the scatter plot. If `float`, + it should be between 0 and 1 and represents the proportion of the + original dataset. If `int`, it represents the number of samples + display on the scatter plot. If `None`, no subsampling will be + applied. by default, 1000 samples or less will be displayed. + + random_state : int or RandomState, default=None + Controls the randomness when `subsample` is not `None`. + See :term:`Glossary ` for details. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + scatter_kwargs : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.scatter` + call. + + line_kwargs : dict, default=None + Dictionary with keyword passed to the `matplotlib.pyplot.plot` + call to draw the optimal line. + + Returns + ------- + display : :class:`~sklearn.metrics.PredictionErrorDisplay` + Object that stores the computed values. + + See Also + -------- + PredictionErrorDisplay : Prediction error visualization for regression. + PredictionErrorDisplay.from_predictions : Prediction error visualization + given the true and predicted targets. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import Ridge + >>> from sklearn.metrics import PredictionErrorDisplay + >>> X, y = load_diabetes(return_X_y=True) + >>> ridge = Ridge().fit(X, y) + >>> disp = PredictionErrorDisplay.from_estimator(ridge, X, y) + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + + y_pred = estimator.predict(X) + + return cls.from_predictions( + y_true=y, + y_pred=y_pred, + kind=kind, + subsample=subsample, + random_state=random_state, + ax=ax, + scatter_kwargs=scatter_kwargs, + line_kwargs=line_kwargs, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_pred, + *, + kind="residual_vs_predicted", + subsample=1_000, + random_state=None, + ax=None, + scatter_kwargs=None, + line_kwargs=None, + ): + """Plot the prediction error given the true and predicted targets. + + For general information regarding `scikit-learn` visualization tools, + read more in the :ref:`Visualization Guide `. + For details regarding interpreting these plots, refer to the + :ref:`Model Evaluation Guide `. + + .. versionadded:: 1.2 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True target values. + + y_pred : array-like of shape (n_samples,) + Predicted target values. + + kind : {"actual_vs_predicted", "residual_vs_predicted"}, \ + default="residual_vs_predicted" + The type of plot to draw: + + - "actual_vs_predicted" draws the observed values (y-axis) vs. + the predicted values (x-axis). + - "residual_vs_predicted" draws the residuals, i.e. difference + between observed and predicted values, (y-axis) vs. the predicted + values (x-axis). + + subsample : float, int or None, default=1_000 + Sampling the samples to be shown on the scatter plot. If `float`, + it should be between 0 and 1 and represents the proportion of the + original dataset. If `int`, it represents the number of samples + display on the scatter plot. If `None`, no subsampling will be + applied. by default, 1000 samples or less will be displayed. + + random_state : int or RandomState, default=None + Controls the randomness when `subsample` is not `None`. + See :term:`Glossary ` for details. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + scatter_kwargs : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.scatter` + call. + + line_kwargs : dict, default=None + Dictionary with keyword passed to the `matplotlib.pyplot.plot` + call to draw the optimal line. + + Returns + ------- + display : :class:`~sklearn.metrics.PredictionErrorDisplay` + Object that stores the computed values. + + See Also + -------- + PredictionErrorDisplay : Prediction error visualization for regression. + PredictionErrorDisplay.from_estimator : Prediction error visualization + given an estimator and some data. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_diabetes + >>> from sklearn.linear_model import Ridge + >>> from sklearn.metrics import PredictionErrorDisplay + >>> X, y = load_diabetes(return_X_y=True) + >>> ridge = Ridge().fit(X, y) + >>> y_pred = ridge.predict(X) + >>> disp = PredictionErrorDisplay.from_predictions(y_true=y, y_pred=y_pred) + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_predictions") + + random_state = check_random_state(random_state) + + n_samples = len(y_true) + if isinstance(subsample, numbers.Integral): + if subsample <= 0: + raise ValueError( + f"When an integer, subsample={subsample} should be positive." + ) + elif isinstance(subsample, numbers.Real): + if subsample <= 0 or subsample >= 1: + raise ValueError( + f"When a floating-point, subsample={subsample} should" + " be in the (0, 1) range." + ) + subsample = int(n_samples * subsample) + + if subsample is not None and subsample < n_samples: + indices = random_state.choice(np.arange(n_samples), size=subsample) + y_true = _safe_indexing(y_true, indices, axis=0) + y_pred = _safe_indexing(y_pred, indices, axis=0) + + viz = cls( + y_true=y_true, + y_pred=y_pred, + ) + + return viz.plot( + ax=ax, + kind=kind, + scatter_kwargs=scatter_kwargs, + line_kwargs=line_kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/roc_curve.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/roc_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..292fb6e2e2f69563c58ea6bc15ae0869847b2f2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/roc_curve.py @@ -0,0 +1,419 @@ +from ...utils._plotting import _BinaryClassifierCurveDisplayMixin +from .._ranking import auc, roc_curve + + +class RocCurveDisplay(_BinaryClassifierCurveDisplayMixin): + """ROC Curve visualization. + + It is recommend to use + :func:`~sklearn.metrics.RocCurveDisplay.from_estimator` or + :func:`~sklearn.metrics.RocCurveDisplay.from_predictions` to create + a :class:`~sklearn.metrics.RocCurveDisplay`. All parameters are + stored as attributes. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fpr : ndarray + False positive rate. + + tpr : ndarray + True positive rate. + + roc_auc : float, default=None + Area under ROC curve. If None, the roc_auc score is not shown. + + estimator_name : str, default=None + Name of estimator. If None, the estimator name is not shown. + + pos_label : int, float, bool or str, default=None + The class considered as the positive class when computing the roc auc + metrics. By default, `estimators.classes_[1]` is considered + as the positive class. + + .. versionadded:: 0.24 + + Attributes + ---------- + line_ : matplotlib Artist + ROC Curve. + + chance_level_ : matplotlib Artist or None + The chance level line. It is `None` if the chance level is not plotted. + + .. versionadded:: 1.3 + + ax_ : matplotlib Axes + Axes with ROC Curve. + + figure_ : matplotlib Figure + Figure containing the curve. + + See Also + -------- + roc_curve : Compute Receiver operating characteristic (ROC) curve. + RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic + (ROC) curve given an estimator and some data. + RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic + (ROC) curve given the true and predicted values. + roc_auc_score : Compute the area under the ROC curve. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from sklearn import metrics + >>> y = np.array([0, 0, 1, 1]) + >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) + >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred) + >>> roc_auc = metrics.auc(fpr, tpr) + >>> display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, + ... estimator_name='example estimator') + >>> display.plot() + <...> + >>> plt.show() + """ + + def __init__(self, *, fpr, tpr, roc_auc=None, estimator_name=None, pos_label=None): + self.estimator_name = estimator_name + self.fpr = fpr + self.tpr = tpr + self.roc_auc = roc_auc + self.pos_label = pos_label + + def plot( + self, + ax=None, + *, + name=None, + plot_chance_level=False, + chance_level_kw=None, + **kwargs, + ): + """Plot visualization. + + Extra keyword arguments will be passed to matplotlib's ``plot``. + + Parameters + ---------- + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + name : str, default=None + Name of ROC Curve for labeling. If `None`, use `estimator_name` if + not `None`, otherwise no labeling is shown. + + plot_chance_level : bool, default=False + Whether to plot the chance level. + + .. versionadded:: 1.3 + + chance_level_kw : dict, default=None + Keyword arguments to be passed to matplotlib's `plot` for rendering + the chance level line. + + .. versionadded:: 1.3 + + **kwargs : dict + Keyword arguments to be passed to matplotlib's `plot`. + + Returns + ------- + display : :class:`~sklearn.metrics.RocCurveDisplay` + Object that stores computed values. + """ + self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name) + + line_kwargs = {} + if self.roc_auc is not None and name is not None: + line_kwargs["label"] = f"{name} (AUC = {self.roc_auc:0.2f})" + elif self.roc_auc is not None: + line_kwargs["label"] = f"AUC = {self.roc_auc:0.2f}" + elif name is not None: + line_kwargs["label"] = name + + line_kwargs.update(**kwargs) + + chance_level_line_kw = { + "label": "Chance level (AUC = 0.5)", + "color": "k", + "linestyle": "--", + } + + if chance_level_kw is not None: + chance_level_line_kw.update(**chance_level_kw) + + (self.line_,) = self.ax_.plot(self.fpr, self.tpr, **line_kwargs) + info_pos_label = ( + f" (Positive label: {self.pos_label})" if self.pos_label is not None else "" + ) + + xlabel = "False Positive Rate" + info_pos_label + ylabel = "True Positive Rate" + info_pos_label + self.ax_.set( + xlabel=xlabel, + xlim=(-0.01, 1.01), + ylabel=ylabel, + ylim=(-0.01, 1.01), + aspect="equal", + ) + + if plot_chance_level: + (self.chance_level_,) = self.ax_.plot( + (0, 1), (0, 1), **chance_level_line_kw + ) + else: + self.chance_level_ = None + + if "label" in line_kwargs or "label" in chance_level_line_kw: + self.ax_.legend(loc="lower right") + + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + y, + *, + sample_weight=None, + drop_intermediate=True, + response_method="auto", + pos_label=None, + name=None, + ax=None, + plot_chance_level=False, + chance_level_kw=None, + **kwargs, + ): + """Create a ROC Curve display from an estimator. + + Parameters + ---------- + estimator : estimator instance + Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` + in which the last estimator is a classifier. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input values. + + y : array-like of shape (n_samples,) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + drop_intermediate : bool, default=True + Whether to drop some suboptimal thresholds which would not appear + on a plotted ROC curve. This is useful in order to create lighter + ROC curves. + + response_method : {'predict_proba', 'decision_function', 'auto'} \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. If set to 'auto', + :term:`predict_proba` is tried first and if it does not exist + :term:`decision_function` is tried next. + + pos_label : int, float, bool or str, default=None + The class considered as the positive class when computing the roc auc + metrics. By default, `estimators.classes_[1]` is considered + as the positive class. + + name : str, default=None + Name of ROC Curve for labeling. If `None`, use the name of the + estimator. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is created. + + plot_chance_level : bool, default=False + Whether to plot the chance level. + + .. versionadded:: 1.3 + + chance_level_kw : dict, default=None + Keyword arguments to be passed to matplotlib's `plot` for rendering + the chance level line. + + .. versionadded:: 1.3 + + **kwargs : dict + Keyword arguments to be passed to matplotlib's `plot`. + + Returns + ------- + display : :class:`~sklearn.metrics.RocCurveDisplay` + The ROC Curve display. + + See Also + -------- + roc_curve : Compute Receiver operating characteristic (ROC) curve. + RocCurveDisplay.from_predictions : ROC Curve visualization given the + probabilities of scores of a classifier. + roc_auc_score : Compute the area under the ROC curve. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import RocCurveDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = SVC(random_state=0).fit(X_train, y_train) + >>> RocCurveDisplay.from_estimator( + ... clf, X_test, y_test) + <...> + >>> plt.show() + """ + y_pred, pos_label, name = cls._validate_and_get_response_values( + estimator, + X, + y, + response_method=response_method, + pos_label=pos_label, + name=name, + ) + + return cls.from_predictions( + y_true=y, + y_pred=y_pred, + sample_weight=sample_weight, + drop_intermediate=drop_intermediate, + name=name, + ax=ax, + pos_label=pos_label, + plot_chance_level=plot_chance_level, + chance_level_kw=chance_level_kw, + **kwargs, + ) + + @classmethod + def from_predictions( + cls, + y_true, + y_pred, + *, + sample_weight=None, + drop_intermediate=True, + pos_label=None, + name=None, + ax=None, + plot_chance_level=False, + chance_level_kw=None, + **kwargs, + ): + """Plot ROC curve given the true and predicted values. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True labels. + + y_pred : array-like of shape (n_samples,) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by “decision_function” on some classifiers). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + drop_intermediate : bool, default=True + Whether to drop some suboptimal thresholds which would not appear + on a plotted ROC curve. This is useful in order to create lighter + ROC curves. + + pos_label : int, float, bool or str, default=None + The label of the positive class. When `pos_label=None`, if `y_true` + is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an + error will be raised. + + name : str, default=None + Name of ROC curve for labeling. If `None`, name will be set to + `"Classifier"`. + + ax : matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + plot_chance_level : bool, default=False + Whether to plot the chance level. + + .. versionadded:: 1.3 + + chance_level_kw : dict, default=None + Keyword arguments to be passed to matplotlib's `plot` for rendering + the chance level line. + + .. versionadded:: 1.3 + + **kwargs : dict + Additional keywords arguments passed to matplotlib `plot` function. + + Returns + ------- + display : :class:`~sklearn.metrics.RocCurveDisplay` + Object that stores computed values. + + See Also + -------- + roc_curve : Compute Receiver operating characteristic (ROC) curve. + RocCurveDisplay.from_estimator : ROC Curve visualization given an + estimator and some data. + roc_auc_score : Compute the area under the ROC curve. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_classification + >>> from sklearn.metrics import RocCurveDisplay + >>> from sklearn.model_selection import train_test_split + >>> from sklearn.svm import SVC + >>> X, y = make_classification(random_state=0) + >>> X_train, X_test, y_train, y_test = train_test_split( + ... X, y, random_state=0) + >>> clf = SVC(random_state=0).fit(X_train, y_train) + >>> y_pred = clf.decision_function(X_test) + >>> RocCurveDisplay.from_predictions( + ... y_test, y_pred) + <...> + >>> plt.show() + """ + pos_label_validated, name = cls._validate_from_predictions_params( + y_true, y_pred, sample_weight=sample_weight, pos_label=pos_label, name=name + ) + + fpr, tpr, _ = roc_curve( + y_true, + y_pred, + pos_label=pos_label, + sample_weight=sample_weight, + drop_intermediate=drop_intermediate, + ) + roc_auc = auc(fpr, tpr) + + viz = cls( + fpr=fpr, + tpr=tpr, + roc_auc=roc_auc, + estimator_name=name, + pos_label=pos_label_validated, + ) + + return viz.plot( + ax=ax, + name=name, + plot_chance_level=plot_chance_level, + chance_level_kw=chance_level_kw, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb56249782541663e7cd1ba0904d5b92e1ca07c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f7736344f500e3852337392a2ce880450200900 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_common_curve_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd31a2ca326a3ff3ea4e220d5091cf13adad135 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_confusion_matrix_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a21402c0c36219fa67b93f353607d009e4a1946 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_det_curve_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13317005fcaef50e0954b92a2f4bd89a668d8baf Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_precision_recall_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a1b97babdb5b78cd5f42e8b1f446df151b886d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_predict_error_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d681e70243ace422472925a717324a8052ecf66b Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/__pycache__/test_roc_curve_display.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_common_curve_display.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_common_curve_display.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe0f0fc6fa7f52ef8afffb9d0cfc17135d515dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_common_curve_display.py @@ -0,0 +1,269 @@ +import numpy as np +import pytest + +from sklearn.base import ClassifierMixin, clone +from sklearn.calibration import CalibrationDisplay +from sklearn.compose import make_column_transformer +from sklearn.datasets import load_iris +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import ( + ConfusionMatrixDisplay, + DetCurveDisplay, + PrecisionRecallDisplay, + PredictionErrorDisplay, + RocCurveDisplay, +) +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor + + +@pytest.fixture(scope="module") +def data(): + return load_iris(return_X_y=True) + + +@pytest.fixture(scope="module") +def data_binary(data): + X, y = data + return X[y < 2], y[y < 2] + + +@pytest.mark.parametrize( + "Display", + [CalibrationDisplay, DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay], +) +def test_display_curve_error_classifier(pyplot, data, data_binary, Display): + """Check that a proper error is raised when only binary classification is + supported.""" + X, y = data + X_binary, y_binary = data_binary + clf = DecisionTreeClassifier().fit(X, y) + + # Case 1: multiclass classifier with multiclass target + msg = "Expected 'estimator' to be a binary classifier. Got 3 classes instead." + with pytest.raises(ValueError, match=msg): + Display.from_estimator(clf, X, y) + + # Case 2: multiclass classifier with binary target + with pytest.raises(ValueError, match=msg): + Display.from_estimator(clf, X_binary, y_binary) + + # Case 3: binary classifier with multiclass target + clf = DecisionTreeClassifier().fit(X_binary, y_binary) + msg = "The target y is not binary. Got multiclass type of target." + with pytest.raises(ValueError, match=msg): + Display.from_estimator(clf, X, y) + + +@pytest.mark.parametrize( + "Display", + [CalibrationDisplay, DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay], +) +def test_display_curve_error_regression(pyplot, data_binary, Display): + """Check that we raise an error with regressor.""" + + # Case 1: regressor + X, y = data_binary + regressor = DecisionTreeRegressor().fit(X, y) + + msg = "Expected 'estimator' to be a binary classifier. Got DecisionTreeRegressor" + with pytest.raises(ValueError, match=msg): + Display.from_estimator(regressor, X, y) + + # Case 2: regression target + classifier = DecisionTreeClassifier().fit(X, y) + # Force `y_true` to be seen as a regression problem + y = y + 0.5 + msg = "The target y is not binary. Got continuous type of target." + with pytest.raises(ValueError, match=msg): + Display.from_estimator(classifier, X, y) + with pytest.raises(ValueError, match=msg): + Display.from_predictions(y, regressor.fit(X, y).predict(X)) + + +@pytest.mark.parametrize( + "response_method, msg", + [ + ( + "predict_proba", + "MyClassifier has none of the following attributes: predict_proba.", + ), + ( + "decision_function", + "MyClassifier has none of the following attributes: decision_function.", + ), + ( + "auto", + ( + "MyClassifier has none of the following attributes: predict_proba," + " decision_function." + ), + ), + ( + "bad_method", + "MyClassifier has none of the following attributes: bad_method.", + ), + ], +) +@pytest.mark.parametrize( + "Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay] +) +def test_display_curve_error_no_response( + pyplot, + data_binary, + response_method, + msg, + Display, +): + """Check that a proper error is raised when the response method requested + is not defined for the given trained classifier.""" + X, y = data_binary + + class MyClassifier(ClassifierMixin): + def fit(self, X, y): + self.classes_ = [0, 1] + return self + + clf = MyClassifier().fit(X, y) + + with pytest.raises(AttributeError, match=msg): + Display.from_estimator(clf, X, y, response_method=response_method) + + +@pytest.mark.parametrize( + "Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay] +) +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_display_curve_estimator_name_multiple_calls( + pyplot, + data_binary, + Display, + constructor_name, +): + """Check that passing `name` when calling `plot` will overwrite the original name + in the legend.""" + X, y = data_binary + clf_name = "my hand-crafted name" + clf = LogisticRegression().fit(X, y) + y_pred = clf.predict_proba(X)[:, 1] + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + if constructor_name == "from_estimator": + disp = Display.from_estimator(clf, X, y, name=clf_name) + else: + disp = Display.from_predictions(y, y_pred, name=clf_name) + assert disp.estimator_name == clf_name + pyplot.close("all") + disp.plot() + assert clf_name in disp.line_.get_label() + pyplot.close("all") + clf_name = "another_name" + disp.plot(name=clf_name) + assert clf_name in disp.line_.get_label() + + +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(), + make_pipeline(StandardScaler(), LogisticRegression()), + make_pipeline( + make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression() + ), + ], +) +@pytest.mark.parametrize( + "Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay] +) +def test_display_curve_not_fitted_errors(pyplot, data_binary, clf, Display): + """Check that a proper error is raised when the classifier is not + fitted.""" + X, y = data_binary + # clone since we parametrize the test and the classifier will be fitted + # when testing the second and subsequent plotting function + model = clone(clf) + with pytest.raises(NotFittedError): + Display.from_estimator(model, X, y) + model.fit(X, y) + disp = Display.from_estimator(model, X, y) + assert model.__class__.__name__ in disp.line_.get_label() + assert disp.estimator_name == model.__class__.__name__ + + +@pytest.mark.parametrize( + "Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay] +) +def test_display_curve_n_samples_consistency(pyplot, data_binary, Display): + """Check the error raised when `y_pred` or `sample_weight` have inconsistent + length.""" + X, y = data_binary + classifier = DecisionTreeClassifier().fit(X, y) + + msg = "Found input variables with inconsistent numbers of samples" + with pytest.raises(ValueError, match=msg): + Display.from_estimator(classifier, X[:-2], y) + with pytest.raises(ValueError, match=msg): + Display.from_estimator(classifier, X, y[:-2]) + with pytest.raises(ValueError, match=msg): + Display.from_estimator(classifier, X, y, sample_weight=np.ones(X.shape[0] - 2)) + + +@pytest.mark.parametrize( + "Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay] +) +def test_display_curve_error_pos_label(pyplot, data_binary, Display): + """Check consistence of error message when `pos_label` should be specified.""" + X, y = data_binary + y = y + 10 + + classifier = DecisionTreeClassifier().fit(X, y) + y_pred = classifier.predict_proba(X)[:, -1] + msg = r"y_true takes value in {10, 11} and pos_label is not specified" + with pytest.raises(ValueError, match=msg): + Display.from_predictions(y, y_pred) + + +@pytest.mark.parametrize( + "Display", + [ + CalibrationDisplay, + DetCurveDisplay, + PrecisionRecallDisplay, + RocCurveDisplay, + PredictionErrorDisplay, + ConfusionMatrixDisplay, + ], +) +@pytest.mark.parametrize( + "constructor", + ["from_predictions", "from_estimator"], +) +def test_classifier_display_curve_named_constructor_return_type( + pyplot, data_binary, Display, constructor +): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + X, y = data_binary + + # This can be anything - we just need to check the named constructor return + # type so the only requirement here is instantiating the class without error + y_pred = y + + classifier = LogisticRegression().fit(X, y) + + class SubclassOfDisplay(Display): + pass + + if constructor == "from_predictions": + curve = SubclassOfDisplay.from_predictions(y, y_pred) + else: # constructor == "from_estimator" + curve = SubclassOfDisplay.from_estimator(classifier, X, y) + + assert isinstance(curve, SubclassOfDisplay) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_confusion_matrix_display.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_confusion_matrix_display.py new file mode 100644 index 0000000000000000000000000000000000000000..66c90d81dc01646727d806e9f8c51463bf0dac33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_confusion_matrix_display.py @@ -0,0 +1,380 @@ +import numpy as np +import pytest +from numpy.testing import ( + assert_allclose, + assert_array_equal, +) + +from sklearn.compose import make_column_transformer +from sklearn.datasets import make_classification +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC, SVR + +# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved +pytestmark = pytest.mark.filterwarnings( + "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:" + "matplotlib.*" +) + + +def test_confusion_matrix_display_validation(pyplot): + """Check that we raise the proper error when validating parameters.""" + X, y = make_classification( + n_samples=100, n_informative=5, n_classes=5, random_state=0 + ) + + with pytest.raises(NotFittedError): + ConfusionMatrixDisplay.from_estimator(SVC(), X, y) + + regressor = SVR().fit(X, y) + y_pred_regressor = regressor.predict(X) + y_pred_classifier = SVC().fit(X, y).predict(X) + + err_msg = "ConfusionMatrixDisplay.from_estimator only supports classifiers" + with pytest.raises(ValueError, match=err_msg): + ConfusionMatrixDisplay.from_estimator(regressor, X, y) + + err_msg = "Mix type of y not allowed, got types" + with pytest.raises(ValueError, match=err_msg): + # Force `y_true` to be seen as a regression problem + ConfusionMatrixDisplay.from_predictions(y + 0.5, y_pred_classifier) + with pytest.raises(ValueError, match=err_msg): + ConfusionMatrixDisplay.from_predictions(y, y_pred_regressor) + + err_msg = "Found input variables with inconsistent numbers of samples" + with pytest.raises(ValueError, match=err_msg): + ConfusionMatrixDisplay.from_predictions(y, y_pred_classifier[::2]) + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize("with_labels", [True, False]) +@pytest.mark.parametrize("with_display_labels", [True, False]) +def test_confusion_matrix_display_custom_labels( + pyplot, constructor_name, with_labels, with_display_labels +): + """Check the resulting plot when labels are given.""" + n_classes = 5 + X, y = make_classification( + n_samples=100, n_informative=5, n_classes=n_classes, random_state=0 + ) + classifier = SVC().fit(X, y) + y_pred = classifier.predict(X) + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + ax = pyplot.gca() + labels = [2, 1, 0, 3, 4] if with_labels else None + display_labels = ["b", "d", "a", "e", "f"] if with_display_labels else None + + cm = confusion_matrix(y, y_pred, labels=labels) + common_kwargs = { + "ax": ax, + "display_labels": display_labels, + "labels": labels, + } + if constructor_name == "from_estimator": + disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs) + else: + disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs) + assert_allclose(disp.confusion_matrix, cm) + + if with_display_labels: + expected_display_labels = display_labels + elif with_labels: + expected_display_labels = labels + else: + expected_display_labels = list(range(n_classes)) + + expected_display_labels_str = [str(name) for name in expected_display_labels] + + x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()] + y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()] + + assert_array_equal(disp.display_labels, expected_display_labels) + assert_array_equal(x_ticks, expected_display_labels_str) + assert_array_equal(y_ticks, expected_display_labels_str) + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize("normalize", ["true", "pred", "all", None]) +@pytest.mark.parametrize("include_values", [True, False]) +def test_confusion_matrix_display_plotting( + pyplot, + constructor_name, + normalize, + include_values, +): + """Check the overall plotting rendering.""" + n_classes = 5 + X, y = make_classification( + n_samples=100, n_informative=5, n_classes=n_classes, random_state=0 + ) + classifier = SVC().fit(X, y) + y_pred = classifier.predict(X) + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + ax = pyplot.gca() + cmap = "plasma" + + cm = confusion_matrix(y, y_pred) + common_kwargs = { + "normalize": normalize, + "cmap": cmap, + "ax": ax, + "include_values": include_values, + } + if constructor_name == "from_estimator": + disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs) + else: + disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs) + + assert disp.ax_ == ax + + if normalize == "true": + cm = cm / cm.sum(axis=1, keepdims=True) + elif normalize == "pred": + cm = cm / cm.sum(axis=0, keepdims=True) + elif normalize == "all": + cm = cm / cm.sum() + + assert_allclose(disp.confusion_matrix, cm) + import matplotlib as mpl + + assert isinstance(disp.im_, mpl.image.AxesImage) + assert disp.im_.get_cmap().name == cmap + assert isinstance(disp.ax_, pyplot.Axes) + assert isinstance(disp.figure_, pyplot.Figure) + + assert disp.ax_.get_ylabel() == "True label" + assert disp.ax_.get_xlabel() == "Predicted label" + + x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()] + y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()] + + expected_display_labels = list(range(n_classes)) + + expected_display_labels_str = [str(name) for name in expected_display_labels] + + assert_array_equal(disp.display_labels, expected_display_labels) + assert_array_equal(x_ticks, expected_display_labels_str) + assert_array_equal(y_ticks, expected_display_labels_str) + + image_data = disp.im_.get_array().data + assert_allclose(image_data, cm) + + if include_values: + assert disp.text_.shape == (n_classes, n_classes) + fmt = ".2g" + expected_text = np.array([format(v, fmt) for v in cm.ravel(order="C")]) + text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")]) + assert_array_equal(expected_text, text_text) + else: + assert disp.text_ is None + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_confusion_matrix_display(pyplot, constructor_name): + """Check the behaviour of the default constructor without using the class + methods.""" + n_classes = 5 + X, y = make_classification( + n_samples=100, n_informative=5, n_classes=n_classes, random_state=0 + ) + classifier = SVC().fit(X, y) + y_pred = classifier.predict(X) + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + cm = confusion_matrix(y, y_pred) + common_kwargs = { + "normalize": None, + "include_values": True, + "cmap": "viridis", + "xticks_rotation": 45.0, + } + if constructor_name == "from_estimator": + disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs) + else: + disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs) + + assert_allclose(disp.confusion_matrix, cm) + assert disp.text_.shape == (n_classes, n_classes) + + rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()] + assert_allclose(rotations, 45.0) + + image_data = disp.im_.get_array().data + assert_allclose(image_data, cm) + + disp.plot(cmap="plasma") + assert disp.im_.get_cmap().name == "plasma" + + disp.plot(include_values=False) + assert disp.text_ is None + + disp.plot(xticks_rotation=90.0) + rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()] + assert_allclose(rotations, 90.0) + + disp.plot(values_format="e") + expected_text = np.array([format(v, "e") for v in cm.ravel(order="C")]) + text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")]) + assert_array_equal(expected_text, text_text) + + +def test_confusion_matrix_contrast(pyplot): + """Check that the text color is appropriate depending on background.""" + + cm = np.eye(2) / 2 + disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1]) + + disp.plot(cmap=pyplot.cm.gray) + # diagonal text is black + assert_allclose(disp.text_[0, 0].get_color(), [0.0, 0.0, 0.0, 1.0]) + assert_allclose(disp.text_[1, 1].get_color(), [0.0, 0.0, 0.0, 1.0]) + + # off-diagonal text is white + assert_allclose(disp.text_[0, 1].get_color(), [1.0, 1.0, 1.0, 1.0]) + assert_allclose(disp.text_[1, 0].get_color(), [1.0, 1.0, 1.0, 1.0]) + + disp.plot(cmap=pyplot.cm.gray_r) + # diagonal text is white + assert_allclose(disp.text_[0, 1].get_color(), [0.0, 0.0, 0.0, 1.0]) + assert_allclose(disp.text_[1, 0].get_color(), [0.0, 0.0, 0.0, 1.0]) + + # off-diagonal text is black + assert_allclose(disp.text_[0, 0].get_color(), [1.0, 1.0, 1.0, 1.0]) + assert_allclose(disp.text_[1, 1].get_color(), [1.0, 1.0, 1.0, 1.0]) + + # Regression test for #15920 + cm = np.array([[19, 34], [32, 58]]) + disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1]) + + disp.plot(cmap=pyplot.cm.Blues) + min_color = pyplot.cm.Blues(0) + max_color = pyplot.cm.Blues(255) + assert_allclose(disp.text_[0, 0].get_color(), max_color) + assert_allclose(disp.text_[0, 1].get_color(), max_color) + assert_allclose(disp.text_[1, 0].get_color(), max_color) + assert_allclose(disp.text_[1, 1].get_color(), min_color) + + +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(), + make_pipeline(StandardScaler(), LogisticRegression()), + make_pipeline( + make_column_transformer((StandardScaler(), [0, 1])), + LogisticRegression(), + ), + ], + ids=["clf", "pipeline-clf", "pipeline-column_transformer-clf"], +) +def test_confusion_matrix_pipeline(pyplot, clf): + """Check the behaviour of the plotting with more complex pipeline.""" + n_classes = 5 + X, y = make_classification( + n_samples=100, n_informative=5, n_classes=n_classes, random_state=0 + ) + with pytest.raises(NotFittedError): + ConfusionMatrixDisplay.from_estimator(clf, X, y) + clf.fit(X, y) + y_pred = clf.predict(X) + + disp = ConfusionMatrixDisplay.from_estimator(clf, X, y) + cm = confusion_matrix(y, y_pred) + + assert_allclose(disp.confusion_matrix, cm) + assert disp.text_.shape == (n_classes, n_classes) + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_confusion_matrix_with_unknown_labels(pyplot, constructor_name): + """Check that when labels=None, the unique values in `y_pred` and `y_true` + will be used. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/18405 + """ + n_classes = 5 + X, y = make_classification( + n_samples=100, n_informative=5, n_classes=n_classes, random_state=0 + ) + classifier = SVC().fit(X, y) + y_pred = classifier.predict(X) + # create unseen labels in `y_true` not seen during fitting and not present + # in 'classifier.classes_' + y = y + 1 + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + common_kwargs = {"labels": None} + if constructor_name == "from_estimator": + disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs) + else: + disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs) + + display_labels = [tick.get_text() for tick in disp.ax_.get_xticklabels()] + expected_labels = [str(i) for i in range(n_classes + 1)] + assert_array_equal(expected_labels, display_labels) + + +def test_colormap_max(pyplot): + """Check that the max color is used for the color of the text.""" + gray = pyplot.get_cmap("gray", 1024) + confusion_matrix = np.array([[1.0, 0.0], [0.0, 1.0]]) + + disp = ConfusionMatrixDisplay(confusion_matrix) + disp.plot(cmap=gray) + + color = disp.text_[1, 0].get_color() + assert_allclose(color, [1.0, 1.0, 1.0, 1.0]) + + +def test_im_kw_adjust_vmin_vmax(pyplot): + """Check that im_kw passes kwargs to imshow""" + + confusion_matrix = np.array([[0.48, 0.04], [0.08, 0.4]]) + disp = ConfusionMatrixDisplay(confusion_matrix) + disp.plot(im_kw=dict(vmin=0.0, vmax=0.8)) + + clim = disp.im_.get_clim() + assert clim[0] == pytest.approx(0.0) + assert clim[1] == pytest.approx(0.8) + + +def test_confusion_matrix_text_kw(pyplot): + """Check that text_kw is passed to the text call.""" + font_size = 15.0 + X, y = make_classification(random_state=0) + classifier = SVC().fit(X, y) + + # from_estimator passes the font size + disp = ConfusionMatrixDisplay.from_estimator( + classifier, X, y, text_kw={"fontsize": font_size} + ) + for text in disp.text_.reshape(-1): + assert text.get_fontsize() == font_size + + # plot adjusts plot to new font size + new_font_size = 20.0 + disp.plot(text_kw={"fontsize": new_font_size}) + for text in disp.text_.reshape(-1): + assert text.get_fontsize() == new_font_size + + # from_predictions passes the font size + y_pred = classifier.predict(X) + disp = ConfusionMatrixDisplay.from_predictions( + y, y_pred, text_kw={"fontsize": font_size} + ) + for text in disp.text_.reshape(-1): + assert text.get_fontsize() == font_size diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_det_curve_display.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_det_curve_display.py new file mode 100644 index 0000000000000000000000000000000000000000..403ea701095772efd81bcc7666b6f419fba039b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_det_curve_display.py @@ -0,0 +1,106 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.datasets import load_iris +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import DetCurveDisplay, det_curve + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("with_strings", [True, False]) +def test_det_curve_display( + pyplot, constructor_name, response_method, with_sample_weight, with_strings +): + X, y = load_iris(return_X_y=True) + # Binarize the data with only the two first classes + X, y = X[y < 2], y[y < 2] + + pos_label = None + if with_strings: + y = np.array(["c", "b"])[y] + pos_label = "c" + + if with_sample_weight: + rng = np.random.RandomState(42) + sample_weight = rng.randint(1, 4, size=(X.shape[0])) + else: + sample_weight = None + + lr = LogisticRegression() + lr.fit(X, y) + y_pred = getattr(lr, response_method)(X) + if y_pred.ndim == 2: + y_pred = y_pred[:, 1] + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + common_kwargs = { + "name": lr.__class__.__name__, + "alpha": 0.8, + "sample_weight": sample_weight, + "pos_label": pos_label, + } + if constructor_name == "from_estimator": + disp = DetCurveDisplay.from_estimator(lr, X, y, **common_kwargs) + else: + disp = DetCurveDisplay.from_predictions(y, y_pred, **common_kwargs) + + fpr, fnr, _ = det_curve( + y, + y_pred, + sample_weight=sample_weight, + pos_label=pos_label, + ) + + assert_allclose(disp.fpr, fpr) + assert_allclose(disp.fnr, fnr) + + assert disp.estimator_name == "LogisticRegression" + + # cannot fail thanks to pyplot fixture + import matplotlib as mpl # noqal + + assert isinstance(disp.line_, mpl.lines.Line2D) + assert disp.line_.get_alpha() == 0.8 + assert isinstance(disp.ax_, mpl.axes.Axes) + assert isinstance(disp.figure_, mpl.figure.Figure) + assert disp.line_.get_label() == "LogisticRegression" + + expected_pos_label = 1 if pos_label is None else pos_label + expected_ylabel = f"False Negative Rate (Positive label: {expected_pos_label})" + expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})" + assert disp.ax_.get_ylabel() == expected_ylabel + assert disp.ax_.get_xlabel() == expected_xlabel + + +@pytest.mark.parametrize( + "constructor_name, expected_clf_name", + [ + ("from_estimator", "LogisticRegression"), + ("from_predictions", "Classifier"), + ], +) +def test_det_curve_display_default_name( + pyplot, + constructor_name, + expected_clf_name, +): + # Check the default name display in the figure when `name` is not provided + X, y = load_iris(return_X_y=True) + # Binarize the data with only the two first classes + X, y = X[y < 2], y[y < 2] + + lr = LogisticRegression().fit(X, y) + y_pred = lr.predict_proba(X)[:, 1] + + if constructor_name == "from_estimator": + disp = DetCurveDisplay.from_estimator(lr, X, y) + else: + disp = DetCurveDisplay.from_predictions(y, y_pred) + + assert disp.estimator_name == expected_clf_name + assert disp.line_.get_label() == expected_clf_name diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_precision_recall_display.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_precision_recall_display.py new file mode 100644 index 0000000000000000000000000000000000000000..0173e5338d722a21efb92fa084b35c3aee968c43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_precision_recall_display.py @@ -0,0 +1,361 @@ +from collections import Counter + +import numpy as np +import pytest + +from sklearn.compose import make_column_transformer +from sklearn.datasets import load_breast_cancer, make_classification +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import ( + PrecisionRecallDisplay, + average_precision_score, + precision_recall_curve, +) +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils import shuffle +from sklearn.utils.fixes import trapezoid + +# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved +pytestmark = pytest.mark.filterwarnings( + "ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:" + "matplotlib.*" +) + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +@pytest.mark.parametrize("drop_intermediate", [True, False]) +def test_precision_recall_display_plotting( + pyplot, constructor_name, response_method, drop_intermediate +): + """Check the overall plotting rendering.""" + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + pos_label = 1 + + classifier = LogisticRegression().fit(X, y) + classifier.fit(X, y) + + y_pred = getattr(classifier, response_method)(X) + y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, pos_label] + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + if constructor_name == "from_estimator": + display = PrecisionRecallDisplay.from_estimator( + classifier, + X, + y, + response_method=response_method, + drop_intermediate=drop_intermediate, + ) + else: + display = PrecisionRecallDisplay.from_predictions( + y, y_pred, pos_label=pos_label, drop_intermediate=drop_intermediate + ) + + precision, recall, _ = precision_recall_curve( + y, y_pred, pos_label=pos_label, drop_intermediate=drop_intermediate + ) + average_precision = average_precision_score(y, y_pred, pos_label=pos_label) + + np.testing.assert_allclose(display.precision, precision) + np.testing.assert_allclose(display.recall, recall) + assert display.average_precision == pytest.approx(average_precision) + + import matplotlib as mpl + + assert isinstance(display.line_, mpl.lines.Line2D) + assert isinstance(display.ax_, mpl.axes.Axes) + assert isinstance(display.figure_, mpl.figure.Figure) + + assert display.ax_.get_xlabel() == "Recall (Positive label: 1)" + assert display.ax_.get_ylabel() == "Precision (Positive label: 1)" + assert display.ax_.get_adjustable() == "box" + assert display.ax_.get_aspect() in ("equal", 1.0) + assert display.ax_.get_xlim() == display.ax_.get_ylim() == (-0.01, 1.01) + + # plotting passing some new parameters + display.plot(alpha=0.8, name="MySpecialEstimator") + expected_label = f"MySpecialEstimator (AP = {average_precision:0.2f})" + assert display.line_.get_label() == expected_label + assert display.line_.get_alpha() == pytest.approx(0.8) + + # Check that the chance level line is not plotted by default + assert display.chance_level_ is None + + +@pytest.mark.parametrize("chance_level_kw", [None, {"color": "r"}]) +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_precision_recall_chance_level_line( + pyplot, + chance_level_kw, + constructor_name, +): + """Check the chance level line plotting behavior.""" + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + pos_prevalence = Counter(y)[1] / len(y) + + lr = LogisticRegression() + y_pred = lr.fit(X, y).predict_proba(X)[:, 1] + + if constructor_name == "from_estimator": + display = PrecisionRecallDisplay.from_estimator( + lr, + X, + y, + plot_chance_level=True, + chance_level_kw=chance_level_kw, + ) + else: + display = PrecisionRecallDisplay.from_predictions( + y, + y_pred, + plot_chance_level=True, + chance_level_kw=chance_level_kw, + ) + + import matplotlib as mpl # noqa + + assert isinstance(display.chance_level_, mpl.lines.Line2D) + assert tuple(display.chance_level_.get_xdata()) == (0, 1) + assert tuple(display.chance_level_.get_ydata()) == (pos_prevalence, pos_prevalence) + + # Checking for chance level line styles + if chance_level_kw is None: + assert display.chance_level_.get_color() == "k" + else: + assert display.chance_level_.get_color() == "r" + + +@pytest.mark.parametrize( + "constructor_name, default_label", + [ + ("from_estimator", "LogisticRegression (AP = {:.2f})"), + ("from_predictions", "Classifier (AP = {:.2f})"), + ], +) +def test_precision_recall_display_name(pyplot, constructor_name, default_label): + """Check the behaviour of the name parameters""" + X, y = make_classification(n_classes=2, n_samples=100, random_state=0) + pos_label = 1 + + classifier = LogisticRegression().fit(X, y) + classifier.fit(X, y) + + y_pred = classifier.predict_proba(X)[:, pos_label] + + # safe guard for the binary if/else construction + assert constructor_name in ("from_estimator", "from_predictions") + + if constructor_name == "from_estimator": + display = PrecisionRecallDisplay.from_estimator(classifier, X, y) + else: + display = PrecisionRecallDisplay.from_predictions( + y, y_pred, pos_label=pos_label + ) + + average_precision = average_precision_score(y, y_pred, pos_label=pos_label) + + # check that the default name is used + assert display.line_.get_label() == default_label.format(average_precision) + + # check that the name can be set + display.plot(name="MySpecialEstimator") + assert ( + display.line_.get_label() + == f"MySpecialEstimator (AP = {average_precision:.2f})" + ) + + +@pytest.mark.parametrize( + "clf", + [ + make_pipeline(StandardScaler(), LogisticRegression()), + make_pipeline( + make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression() + ), + ], +) +def test_precision_recall_display_pipeline(pyplot, clf): + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + with pytest.raises(NotFittedError): + PrecisionRecallDisplay.from_estimator(clf, X, y) + clf.fit(X, y) + display = PrecisionRecallDisplay.from_estimator(clf, X, y) + assert display.estimator_name == clf.__class__.__name__ + + +def test_precision_recall_display_string_labels(pyplot): + # regression test #15738 + cancer = load_breast_cancer() + X, y = cancer.data, cancer.target_names[cancer.target] + + lr = make_pipeline(StandardScaler(), LogisticRegression()) + lr.fit(X, y) + for klass in cancer.target_names: + assert klass in lr.classes_ + display = PrecisionRecallDisplay.from_estimator(lr, X, y) + + y_pred = lr.predict_proba(X)[:, 1] + avg_prec = average_precision_score(y, y_pred, pos_label=lr.classes_[1]) + + assert display.average_precision == pytest.approx(avg_prec) + assert display.estimator_name == lr.__class__.__name__ + + err_msg = r"y_true takes value in {'benign', 'malignant'}" + with pytest.raises(ValueError, match=err_msg): + PrecisionRecallDisplay.from_predictions(y, y_pred) + + display = PrecisionRecallDisplay.from_predictions( + y, y_pred, pos_label=lr.classes_[1] + ) + assert display.average_precision == pytest.approx(avg_prec) + + +@pytest.mark.parametrize( + "average_precision, estimator_name, expected_label", + [ + (0.9, None, "AP = 0.90"), + (None, "my_est", "my_est"), + (0.8, "my_est2", "my_est2 (AP = 0.80)"), + ], +) +def test_default_labels(pyplot, average_precision, estimator_name, expected_label): + """Check the default labels used in the display.""" + precision = np.array([1, 0.5, 0]) + recall = np.array([0, 0.5, 1]) + display = PrecisionRecallDisplay( + precision, + recall, + average_precision=average_precision, + estimator_name=estimator_name, + ) + display.plot() + assert display.line_.get_label() == expected_label + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_plot_precision_recall_pos_label(pyplot, constructor_name, response_method): + # check that we can provide the positive label and display the proper + # statistics + X, y = load_breast_cancer(return_X_y=True) + # create an highly imbalanced version of the breast cancer dataset + idx_positive = np.flatnonzero(y == 1) + idx_negative = np.flatnonzero(y == 0) + idx_selected = np.hstack([idx_negative, idx_positive[:25]]) + X, y = X[idx_selected], y[idx_selected] + X, y = shuffle(X, y, random_state=42) + # only use 2 features to make the problem even harder + X = X[:, :2] + y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + stratify=y, + random_state=0, + ) + + classifier = LogisticRegression() + classifier.fit(X_train, y_train) + + # sanity check to be sure the positive class is classes_[0] and that we + # are betrayed by the class imbalance + assert classifier.classes_.tolist() == ["cancer", "not cancer"] + + y_pred = getattr(classifier, response_method)(X_test) + # we select the corresponding probability columns or reverse the decision + # function otherwise + y_pred_cancer = -1 * y_pred if y_pred.ndim == 1 else y_pred[:, 0] + y_pred_not_cancer = y_pred if y_pred.ndim == 1 else y_pred[:, 1] + + if constructor_name == "from_estimator": + display = PrecisionRecallDisplay.from_estimator( + classifier, + X_test, + y_test, + pos_label="cancer", + response_method=response_method, + ) + else: + display = PrecisionRecallDisplay.from_predictions( + y_test, + y_pred_cancer, + pos_label="cancer", + ) + # we should obtain the statistics of the "cancer" class + avg_prec_limit = 0.65 + assert display.average_precision < avg_prec_limit + assert -trapezoid(display.precision, display.recall) < avg_prec_limit + + # otherwise we should obtain the statistics of the "not cancer" class + if constructor_name == "from_estimator": + display = PrecisionRecallDisplay.from_estimator( + classifier, + X_test, + y_test, + response_method=response_method, + pos_label="not cancer", + ) + else: + display = PrecisionRecallDisplay.from_predictions( + y_test, + y_pred_not_cancer, + pos_label="not cancer", + ) + avg_prec_limit = 0.95 + assert display.average_precision > avg_prec_limit + assert -trapezoid(display.precision, display.recall) > avg_prec_limit + + +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_precision_recall_prevalence_pos_label_reusable(pyplot, constructor_name): + # Check that even if one passes plot_chance_level=False the first time + # one can still call disp.plot with plot_chance_level=True and get the + # chance level line + X, y = make_classification(n_classes=2, n_samples=50, random_state=0) + + lr = LogisticRegression() + y_pred = lr.fit(X, y).predict_proba(X)[:, 1] + + if constructor_name == "from_estimator": + display = PrecisionRecallDisplay.from_estimator( + lr, X, y, plot_chance_level=False + ) + else: + display = PrecisionRecallDisplay.from_predictions( + y, y_pred, plot_chance_level=False + ) + assert display.chance_level_ is None + + import matplotlib as mpl # noqa + + # When calling from_estimator or from_predictions, + # prevalence_pos_label should have been set, so that directly + # calling plot_chance_level=True should plot the chance level line + display.plot(plot_chance_level=True) + assert isinstance(display.chance_level_, mpl.lines.Line2D) + + +def test_precision_recall_raise_no_prevalence(pyplot): + # Check that raises correctly when plotting chance level with + # no prvelance_pos_label is provided + precision = np.array([1, 0.5, 0]) + recall = np.array([0, 0.5, 1]) + display = PrecisionRecallDisplay(precision, recall) + + msg = ( + "You must provide prevalence_pos_label when constructing the " + "PrecisionRecallDisplay object in order to plot the chance " + "level line. Alternatively, you may use " + "PrecisionRecallDisplay.from_estimator or " + "PrecisionRecallDisplay.from_predictions " + "to automatically set prevalence_pos_label" + ) + + with pytest.raises(ValueError, match=msg): + display.plot(plot_chance_level=True) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_predict_error_display.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_predict_error_display.py new file mode 100644 index 0000000000000000000000000000000000000000..535c9af9506ce06c1440b32449e3b5756964a321 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_predict_error_display.py @@ -0,0 +1,161 @@ +import pytest +from numpy.testing import assert_allclose + +from sklearn.datasets import load_diabetes +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import Ridge +from sklearn.metrics import PredictionErrorDisplay + +X, y = load_diabetes(return_X_y=True) + + +@pytest.fixture +def regressor_fitted(): + return Ridge().fit(X, y) + + +@pytest.mark.parametrize( + "regressor, params, err_type, err_msg", + [ + ( + Ridge().fit(X, y), + {"subsample": -1}, + ValueError, + "When an integer, subsample=-1 should be", + ), + ( + Ridge().fit(X, y), + {"subsample": 20.0}, + ValueError, + "When a floating-point, subsample=20.0 should be", + ), + ( + Ridge().fit(X, y), + {"subsample": -20.0}, + ValueError, + "When a floating-point, subsample=-20.0 should be", + ), + ( + Ridge().fit(X, y), + {"kind": "xxx"}, + ValueError, + "`kind` must be one of", + ), + ], +) +@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) +def test_prediction_error_display_raise_error( + pyplot, class_method, regressor, params, err_type, err_msg +): + """Check that we raise the proper error when making the parameters + # validation.""" + with pytest.raises(err_type, match=err_msg): + if class_method == "from_estimator": + PredictionErrorDisplay.from_estimator(regressor, X, y, **params) + else: + y_pred = regressor.predict(X) + PredictionErrorDisplay.from_predictions(y_true=y, y_pred=y_pred, **params) + + +def test_from_estimator_not_fitted(pyplot): + """Check that we raise a `NotFittedError` when the passed regressor is not + fit.""" + regressor = Ridge() + with pytest.raises(NotFittedError, match="is not fitted yet."): + PredictionErrorDisplay.from_estimator(regressor, X, y) + + +@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize("kind", ["actual_vs_predicted", "residual_vs_predicted"]) +def test_prediction_error_display(pyplot, regressor_fitted, class_method, kind): + """Check the default behaviour of the display.""" + if class_method == "from_estimator": + display = PredictionErrorDisplay.from_estimator( + regressor_fitted, X, y, kind=kind + ) + else: + y_pred = regressor_fitted.predict(X) + display = PredictionErrorDisplay.from_predictions( + y_true=y, y_pred=y_pred, kind=kind + ) + + if kind == "actual_vs_predicted": + assert_allclose(display.line_.get_xdata(), display.line_.get_ydata()) + assert display.ax_.get_xlabel() == "Predicted values" + assert display.ax_.get_ylabel() == "Actual values" + assert display.line_ is not None + else: + assert display.ax_.get_xlabel() == "Predicted values" + assert display.ax_.get_ylabel() == "Residuals (actual - predicted)" + assert display.line_ is not None + + assert display.ax_.get_legend() is None + + +@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) +@pytest.mark.parametrize( + "subsample, expected_size", + [(5, 5), (0.1, int(X.shape[0] * 0.1)), (None, X.shape[0])], +) +def test_plot_prediction_error_subsample( + pyplot, regressor_fitted, class_method, subsample, expected_size +): + """Check the behaviour of `subsample`.""" + if class_method == "from_estimator": + display = PredictionErrorDisplay.from_estimator( + regressor_fitted, X, y, subsample=subsample + ) + else: + y_pred = regressor_fitted.predict(X) + display = PredictionErrorDisplay.from_predictions( + y_true=y, y_pred=y_pred, subsample=subsample + ) + assert len(display.scatter_.get_offsets()) == expected_size + + +@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) +def test_plot_prediction_error_ax(pyplot, regressor_fitted, class_method): + """Check that we can pass an axis to the display.""" + _, ax = pyplot.subplots() + if class_method == "from_estimator": + display = PredictionErrorDisplay.from_estimator(regressor_fitted, X, y, ax=ax) + else: + y_pred = regressor_fitted.predict(X) + display = PredictionErrorDisplay.from_predictions( + y_true=y, y_pred=y_pred, ax=ax + ) + assert display.ax_ is ax + + +@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) +def test_prediction_error_custom_artist(pyplot, regressor_fitted, class_method): + """Check that we can tune the style of the lines.""" + extra_params = { + "kind": "actual_vs_predicted", + "scatter_kwargs": {"color": "red"}, + "line_kwargs": {"color": "black"}, + } + if class_method == "from_estimator": + display = PredictionErrorDisplay.from_estimator( + regressor_fitted, X, y, **extra_params + ) + else: + y_pred = regressor_fitted.predict(X) + display = PredictionErrorDisplay.from_predictions( + y_true=y, y_pred=y_pred, **extra_params + ) + + assert display.line_.get_color() == "black" + assert_allclose(display.scatter_.get_edgecolor(), [[1.0, 0.0, 0.0, 0.8]]) + + # create a display with the default values + if class_method == "from_estimator": + display = PredictionErrorDisplay.from_estimator(regressor_fitted, X, y) + else: + y_pred = regressor_fitted.predict(X) + display = PredictionErrorDisplay.from_predictions(y_true=y, y_pred=y_pred) + pyplot.close("all") + + display.plot(**extra_params) + assert display.line_.get_color() == "black" + assert_allclose(display.scatter_.get_edgecolor(), [[1.0, 0.0, 0.0, 0.8]]) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_roc_curve_display.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_roc_curve_display.py new file mode 100644 index 0000000000000000000000000000000000000000..8fd9f965765186f25a8a575652191427a2aa4240 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_plot/tests/test_roc_curve_display.py @@ -0,0 +1,315 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose + +from sklearn.compose import make_column_transformer +from sklearn.datasets import load_breast_cancer, load_iris +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import RocCurveDisplay, auc, roc_curve +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils import shuffle +from sklearn.utils.fixes import trapezoid + + +@pytest.fixture(scope="module") +def data(): + return load_iris(return_X_y=True) + + +@pytest.fixture(scope="module") +def data_binary(data): + X, y = data + return X[y < 2], y[y < 2] + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +@pytest.mark.parametrize("with_sample_weight", [True, False]) +@pytest.mark.parametrize("drop_intermediate", [True, False]) +@pytest.mark.parametrize("with_strings", [True, False]) +@pytest.mark.parametrize( + "constructor_name, default_name", + [ + ("from_estimator", "LogisticRegression"), + ("from_predictions", "Classifier"), + ], +) +def test_roc_curve_display_plotting( + pyplot, + response_method, + data_binary, + with_sample_weight, + drop_intermediate, + with_strings, + constructor_name, + default_name, +): + """Check the overall plotting behaviour.""" + X, y = data_binary + + pos_label = None + if with_strings: + y = np.array(["c", "b"])[y] + pos_label = "c" + + if with_sample_weight: + rng = np.random.RandomState(42) + sample_weight = rng.randint(1, 4, size=(X.shape[0])) + else: + sample_weight = None + + lr = LogisticRegression() + lr.fit(X, y) + + y_pred = getattr(lr, response_method)(X) + y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, 1] + + if constructor_name == "from_estimator": + display = RocCurveDisplay.from_estimator( + lr, + X, + y, + sample_weight=sample_weight, + drop_intermediate=drop_intermediate, + pos_label=pos_label, + alpha=0.8, + ) + else: + display = RocCurveDisplay.from_predictions( + y, + y_pred, + sample_weight=sample_weight, + drop_intermediate=drop_intermediate, + pos_label=pos_label, + alpha=0.8, + ) + + fpr, tpr, _ = roc_curve( + y, + y_pred, + sample_weight=sample_weight, + drop_intermediate=drop_intermediate, + pos_label=pos_label, + ) + + assert_allclose(display.roc_auc, auc(fpr, tpr)) + assert_allclose(display.fpr, fpr) + assert_allclose(display.tpr, tpr) + + assert display.estimator_name == default_name + + import matplotlib as mpl # noqal + + assert isinstance(display.line_, mpl.lines.Line2D) + assert display.line_.get_alpha() == 0.8 + assert isinstance(display.ax_, mpl.axes.Axes) + assert isinstance(display.figure_, mpl.figure.Figure) + assert display.ax_.get_adjustable() == "box" + assert display.ax_.get_aspect() in ("equal", 1.0) + assert display.ax_.get_xlim() == display.ax_.get_ylim() == (-0.01, 1.01) + + expected_label = f"{default_name} (AUC = {display.roc_auc:.2f})" + assert display.line_.get_label() == expected_label + + expected_pos_label = 1 if pos_label is None else pos_label + expected_ylabel = f"True Positive Rate (Positive label: {expected_pos_label})" + expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})" + + assert display.ax_.get_ylabel() == expected_ylabel + assert display.ax_.get_xlabel() == expected_xlabel + + +@pytest.mark.parametrize("plot_chance_level", [True, False]) +@pytest.mark.parametrize( + "chance_level_kw", + [None, {"linewidth": 1, "color": "red", "label": "DummyEstimator"}], +) +@pytest.mark.parametrize( + "constructor_name", + ["from_estimator", "from_predictions"], +) +def test_roc_curve_chance_level_line( + pyplot, + data_binary, + plot_chance_level, + chance_level_kw, + constructor_name, +): + """Check the chance level line plotting behaviour.""" + X, y = data_binary + + lr = LogisticRegression() + lr.fit(X, y) + + y_pred = getattr(lr, "predict_proba")(X) + y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, 1] + + if constructor_name == "from_estimator": + display = RocCurveDisplay.from_estimator( + lr, + X, + y, + alpha=0.8, + plot_chance_level=plot_chance_level, + chance_level_kw=chance_level_kw, + ) + else: + display = RocCurveDisplay.from_predictions( + y, + y_pred, + alpha=0.8, + plot_chance_level=plot_chance_level, + chance_level_kw=chance_level_kw, + ) + + import matplotlib as mpl # noqa + + assert isinstance(display.line_, mpl.lines.Line2D) + assert display.line_.get_alpha() == 0.8 + assert isinstance(display.ax_, mpl.axes.Axes) + assert isinstance(display.figure_, mpl.figure.Figure) + + if plot_chance_level: + assert isinstance(display.chance_level_, mpl.lines.Line2D) + assert tuple(display.chance_level_.get_xdata()) == (0, 1) + assert tuple(display.chance_level_.get_ydata()) == (0, 1) + else: + assert display.chance_level_ is None + + # Checking for chance level line styles + if plot_chance_level and chance_level_kw is None: + assert display.chance_level_.get_color() == "k" + assert display.chance_level_.get_linestyle() == "--" + assert display.chance_level_.get_label() == "Chance level (AUC = 0.5)" + elif plot_chance_level: + assert display.chance_level_.get_label() == chance_level_kw["label"] + assert display.chance_level_.get_color() == chance_level_kw["color"] + assert display.chance_level_.get_linewidth() == chance_level_kw["linewidth"] + + +@pytest.mark.parametrize( + "clf", + [ + LogisticRegression(), + make_pipeline(StandardScaler(), LogisticRegression()), + make_pipeline( + make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression() + ), + ], +) +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_roc_curve_display_complex_pipeline(pyplot, data_binary, clf, constructor_name): + """Check the behaviour with complex pipeline.""" + X, y = data_binary + + if constructor_name == "from_estimator": + with pytest.raises(NotFittedError): + RocCurveDisplay.from_estimator(clf, X, y) + + clf.fit(X, y) + + if constructor_name == "from_estimator": + display = RocCurveDisplay.from_estimator(clf, X, y) + name = clf.__class__.__name__ + else: + display = RocCurveDisplay.from_predictions(y, y) + name = "Classifier" + + assert name in display.line_.get_label() + assert display.estimator_name == name + + +@pytest.mark.parametrize( + "roc_auc, estimator_name, expected_label", + [ + (0.9, None, "AUC = 0.90"), + (None, "my_est", "my_est"), + (0.8, "my_est2", "my_est2 (AUC = 0.80)"), + ], +) +def test_roc_curve_display_default_labels( + pyplot, roc_auc, estimator_name, expected_label +): + """Check the default labels used in the display.""" + fpr = np.array([0, 0.5, 1]) + tpr = np.array([0, 0.5, 1]) + disp = RocCurveDisplay( + fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=estimator_name + ).plot() + assert disp.line_.get_label() == expected_label + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"]) +def test_plot_roc_curve_pos_label(pyplot, response_method, constructor_name): + # check that we can provide the positive label and display the proper + # statistics + X, y = load_breast_cancer(return_X_y=True) + # create an highly imbalanced + idx_positive = np.flatnonzero(y == 1) + idx_negative = np.flatnonzero(y == 0) + idx_selected = np.hstack([idx_negative, idx_positive[:25]]) + X, y = X[idx_selected], y[idx_selected] + X, y = shuffle(X, y, random_state=42) + # only use 2 features to make the problem even harder + X = X[:, :2] + y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object) + X_train, X_test, y_train, y_test = train_test_split( + X, + y, + stratify=y, + random_state=0, + ) + + classifier = LogisticRegression() + classifier.fit(X_train, y_train) + + # sanity check to be sure the positive class is classes_[0] and that we + # are betrayed by the class imbalance + assert classifier.classes_.tolist() == ["cancer", "not cancer"] + + y_pred = getattr(classifier, response_method)(X_test) + # we select the corresponding probability columns or reverse the decision + # function otherwise + y_pred_cancer = -1 * y_pred if y_pred.ndim == 1 else y_pred[:, 0] + y_pred_not_cancer = y_pred if y_pred.ndim == 1 else y_pred[:, 1] + + if constructor_name == "from_estimator": + display = RocCurveDisplay.from_estimator( + classifier, + X_test, + y_test, + pos_label="cancer", + response_method=response_method, + ) + else: + display = RocCurveDisplay.from_predictions( + y_test, + y_pred_cancer, + pos_label="cancer", + ) + + roc_auc_limit = 0.95679 + + assert display.roc_auc == pytest.approx(roc_auc_limit) + assert trapezoid(display.tpr, display.fpr) == pytest.approx(roc_auc_limit) + + if constructor_name == "from_estimator": + display = RocCurveDisplay.from_estimator( + classifier, + X_test, + y_test, + response_method=response_method, + pos_label="not cancer", + ) + else: + display = RocCurveDisplay.from_predictions( + y_test, + y_pred_not_cancer, + pos_label="not cancer", + ) + + assert display.roc_auc == pytest.approx(roc_auc_limit) + assert trapezoid(display.tpr, display.fpr) == pytest.approx(roc_auc_limit) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py new file mode 100644 index 0000000000000000000000000000000000000000..01241045f9e55993aeaf775a470518ce99c48a52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_ranking.py @@ -0,0 +1,2014 @@ +"""Metrics to assess performance on classification task given scores. + +Functions named as ``*_score`` return a scalar value to maximize: the higher +the better. + +Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: +the lower the better. +""" + +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Arnaud Joly +# Jochen Wersdorfer +# Lars Buitinck +# Joel Nothman +# Noel Dawe +# Michal Karbownik +# License: BSD 3 clause + + +import warnings +from functools import partial +from numbers import Integral, Real + +import numpy as np +from scipy.sparse import csr_matrix, issparse +from scipy.stats import rankdata + +from ..exceptions import UndefinedMetricWarning +from ..preprocessing import label_binarize +from ..utils import ( + assert_all_finite, + check_array, + check_consistent_length, + column_or_1d, +) +from ..utils._encode import _encode, _unique +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.extmath import stable_cumsum +from ..utils.fixes import trapezoid +from ..utils.multiclass import type_of_target +from ..utils.sparsefuncs import count_nonzero +from ..utils.validation import _check_pos_label_consistency, _check_sample_weight +from ._base import _average_binary_score, _average_multiclass_ovo_score + + +@validate_params( + {"x": ["array-like"], "y": ["array-like"]}, + prefer_skip_nested_validation=True, +) +def auc(x, y): + """Compute Area Under the Curve (AUC) using the trapezoidal rule. + + This is a general function, given points on a curve. For computing the + area under the ROC-curve, see :func:`roc_auc_score`. For an alternative + way to summarize a precision-recall curve, see + :func:`average_precision_score`. + + Parameters + ---------- + x : array-like of shape (n,) + X coordinates. These must be either monotonic increasing or monotonic + decreasing. + y : array-like of shape (n,) + Y coordinates. + + Returns + ------- + auc : float + Area Under the Curve. + + See Also + -------- + roc_auc_score : Compute the area under the ROC curve. + average_precision_score : Compute average precision from prediction scores. + precision_recall_curve : Compute precision-recall pairs for different + probability thresholds. + + Examples + -------- + >>> import numpy as np + >>> from sklearn import metrics + >>> y = np.array([1, 1, 2, 2]) + >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) + >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) + >>> metrics.auc(fpr, tpr) + 0.75 + """ + check_consistent_length(x, y) + x = column_or_1d(x) + y = column_or_1d(y) + + if x.shape[0] < 2: + raise ValueError( + "At least 2 points are needed to compute area under curve, but x.shape = %s" + % x.shape + ) + + direction = 1 + dx = np.diff(x) + if np.any(dx < 0): + if np.all(dx <= 0): + direction = -1 + else: + raise ValueError("x is neither increasing nor decreasing : {}.".format(x)) + + area = direction * trapezoid(y, x) + if isinstance(area, np.memmap): + # Reductions such as .sum used internally in trapezoid do not return a + # scalar by default for numpy.memmap instances contrary to + # regular numpy.ndarray instances. + area = area.dtype.type(area) + return area + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "average": [StrOptions({"micro", "samples", "weighted", "macro"}), None], + "pos_label": [Real, str, "boolean"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def average_precision_score( + y_true, y_score, *, average="macro", pos_label=1, sample_weight=None +): + """Compute average precision (AP) from prediction scores. + + AP summarizes a precision-recall curve as the weighted mean of precisions + achieved at each threshold, with the increase in recall from the previous + threshold used as the weight: + + .. math:: + \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n + + where :math:`P_n` and :math:`R_n` are the precision and recall at the nth + threshold [1]_. This implementation is not interpolated and is different + from computing the area under the precision-recall curve with the + trapezoidal rule, which uses linear interpolation and can be too + optimistic. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_classes) + True binary labels or binary label indicators. + + y_score : array-like of shape (n_samples,) or (n_samples, n_classes) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by :term:`decision_function` on some classifiers). + + average : {'micro', 'samples', 'weighted', 'macro'} or None, \ + default='macro' + If ``None``, the scores for each class are returned. Otherwise, + this determines the type of averaging performed on the data: + + ``'micro'``: + Calculate metrics globally by considering each element of the label + indicator matrix as a label. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average, weighted + by support (the number of true instances for each label). + ``'samples'``: + Calculate metrics for each instance, and find their average. + + Will be ignored when ``y_true`` is binary. + + pos_label : int, float, bool or str, default=1 + The label of the positive class. Only applied to binary ``y_true``. + For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + average_precision : float + Average precision score. + + See Also + -------- + roc_auc_score : Compute the area under the ROC curve. + precision_recall_curve : Compute precision-recall pairs for different + probability thresholds. + + Notes + ----- + .. versionchanged:: 0.19 + Instead of linearly interpolating between operating points, precisions + are weighted by the change in recall since the last operating point. + + References + ---------- + .. [1] `Wikipedia entry for the Average precision + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import average_precision_score + >>> y_true = np.array([0, 0, 1, 1]) + >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) + >>> average_precision_score(y_true, y_scores) + 0.83... + >>> y_true = np.array([0, 0, 1, 1, 2, 2]) + >>> y_scores = np.array([ + ... [0.7, 0.2, 0.1], + ... [0.4, 0.3, 0.3], + ... [0.1, 0.8, 0.1], + ... [0.2, 0.3, 0.5], + ... [0.4, 0.4, 0.2], + ... [0.1, 0.2, 0.7], + ... ]) + >>> average_precision_score(y_true, y_scores) + 0.77... + """ + + def _binary_uninterpolated_average_precision( + y_true, y_score, pos_label=1, sample_weight=None + ): + precision, recall, _ = precision_recall_curve( + y_true, y_score, pos_label=pos_label, sample_weight=sample_weight + ) + # Return the step function integral + # The following works because the last entry of precision is + # guaranteed to be 1, as returned by precision_recall_curve + return -np.sum(np.diff(recall) * np.array(precision)[:-1]) + + y_type = type_of_target(y_true, input_name="y_true") + + # Convert to Python primitive type to avoid NumPy type / Python str + # comparison. See https://github.com/numpy/numpy/issues/6784 + present_labels = np.unique(y_true).tolist() + + if y_type == "binary": + if len(present_labels) == 2 and pos_label not in present_labels: + raise ValueError( + f"pos_label={pos_label} is not a valid label. It should be " + f"one of {present_labels}" + ) + + elif y_type == "multilabel-indicator" and pos_label != 1: + raise ValueError( + "Parameter pos_label is fixed to 1 for multilabel-indicator y_true. " + "Do not set pos_label or set pos_label to 1." + ) + + elif y_type == "multiclass": + if pos_label != 1: + raise ValueError( + "Parameter pos_label is fixed to 1 for multiclass y_true. " + "Do not set pos_label or set pos_label to 1." + ) + y_true = label_binarize(y_true, classes=present_labels) + + average_precision = partial( + _binary_uninterpolated_average_precision, pos_label=pos_label + ) + return _average_binary_score( + average_precision, y_true, y_score, average, sample_weight=sample_weight + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "pos_label": [Real, str, "boolean", None], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def det_curve(y_true, y_score, pos_label=None, sample_weight=None): + """Compute error rates for different probability thresholds. + + .. note:: + This metric is used for evaluation of ranking and error tradeoffs of + a binary classification task. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + y_true : ndarray of shape (n_samples,) + True binary labels. If labels are not either {-1, 1} or {0, 1}, then + pos_label should be explicitly given. + + y_score : ndarray of shape of (n_samples,) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by "decision_function" on some classifiers). + + pos_label : int, float, bool or str, default=None + The label of the positive class. + When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, + ``pos_label`` is set to 1, otherwise an error will be raised. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + fpr : ndarray of shape (n_thresholds,) + False positive rate (FPR) such that element i is the false positive + rate of predictions with score >= thresholds[i]. This is occasionally + referred to as false acceptance probability or fall-out. + + fnr : ndarray of shape (n_thresholds,) + False negative rate (FNR) such that element i is the false negative + rate of predictions with score >= thresholds[i]. This is occasionally + referred to as false rejection or miss rate. + + thresholds : ndarray of shape (n_thresholds,) + Decreasing score values. + + See Also + -------- + DetCurveDisplay.from_estimator : Plot DET curve given an estimator and + some data. + DetCurveDisplay.from_predictions : Plot DET curve given the true and + predicted labels. + DetCurveDisplay : DET curve visualization. + roc_curve : Compute Receiver operating characteristic (ROC) curve. + precision_recall_curve : Compute precision-recall curve. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import det_curve + >>> y_true = np.array([0, 0, 1, 1]) + >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) + >>> fpr, fnr, thresholds = det_curve(y_true, y_scores) + >>> fpr + array([0.5, 0.5, 0. ]) + >>> fnr + array([0. , 0.5, 0.5]) + >>> thresholds + array([0.35, 0.4 , 0.8 ]) + """ + fps, tps, thresholds = _binary_clf_curve( + y_true, y_score, pos_label=pos_label, sample_weight=sample_weight + ) + + if len(np.unique(y_true)) != 2: + raise ValueError( + "Only one class present in y_true. Detection error " + "tradeoff curve is not defined in that case." + ) + + fns = tps[-1] - tps + p_count = tps[-1] + n_count = fps[-1] + + # start with false positives zero + first_ind = ( + fps.searchsorted(fps[0], side="right") - 1 + if fps.searchsorted(fps[0], side="right") > 0 + else None + ) + # stop with false negatives zero + last_ind = tps.searchsorted(tps[-1]) + 1 + sl = slice(first_ind, last_ind) + + # reverse the output such that list of false positives is decreasing + return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1]) + + +def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None): + """Binary roc auc score.""" + if len(np.unique(y_true)) != 2: + raise ValueError( + "Only one class present in y_true. ROC AUC score " + "is not defined in that case." + ) + + fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) + if max_fpr is None or max_fpr == 1: + return auc(fpr, tpr) + if max_fpr <= 0 or max_fpr > 1: + raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr) + + # Add a single point at max_fpr by linear interpolation + stop = np.searchsorted(fpr, max_fpr, "right") + x_interp = [fpr[stop - 1], fpr[stop]] + y_interp = [tpr[stop - 1], tpr[stop]] + tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp)) + fpr = np.append(fpr[:stop], max_fpr) + partial_auc = auc(fpr, tpr) + + # McClish correction: standardize result to be 0.5 if non-discriminant + # and 1 if maximal + min_area = 0.5 * max_fpr**2 + max_area = max_fpr + return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "average": [StrOptions({"micro", "macro", "samples", "weighted"}), None], + "sample_weight": ["array-like", None], + "max_fpr": [Interval(Real, 0.0, 1, closed="right"), None], + "multi_class": [StrOptions({"raise", "ovr", "ovo"})], + "labels": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def roc_auc_score( + y_true, + y_score, + *, + average="macro", + sample_weight=None, + max_fpr=None, + multi_class="raise", + labels=None, +): + """Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \ + from prediction scores. + + Note: this implementation can be used with binary, multiclass and + multilabel classification, but some restrictions apply (see Parameters). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_classes) + True labels or binary label indicators. The binary and multiclass cases + expect labels with shape (n_samples,) while the multilabel case expects + binary label indicators with shape (n_samples, n_classes). + + y_score : array-like of shape (n_samples,) or (n_samples, n_classes) + Target scores. + + * In the binary case, it corresponds to an array of shape + `(n_samples,)`. Both probability estimates and non-thresholded + decision values can be provided. The probability estimates correspond + to the **probability of the class with the greater label**, + i.e. `estimator.classes_[1]` and thus + `estimator.predict_proba(X, y)[:, 1]`. The decision values + corresponds to the output of `estimator.decision_function(X, y)`. + See more information in the :ref:`User guide `; + * In the multiclass case, it corresponds to an array of shape + `(n_samples, n_classes)` of probability estimates provided by the + `predict_proba` method. The probability estimates **must** + sum to 1 across the possible classes. In addition, the order of the + class scores must correspond to the order of ``labels``, + if provided, or else to the numerical or lexicographical order of + the labels in ``y_true``. See more information in the + :ref:`User guide `; + * In the multilabel case, it corresponds to an array of shape + `(n_samples, n_classes)`. Probability estimates are provided by the + `predict_proba` method and the non-thresholded decision values by + the `decision_function` method. The probability estimates correspond + to the **probability of the class with the greater label for each + output** of the classifier. See more information in the + :ref:`User guide `. + + average : {'micro', 'macro', 'samples', 'weighted'} or None, \ + default='macro' + If ``None``, the scores for each class are returned. + Otherwise, this determines the type of averaging performed on the data. + Note: multiclass ROC AUC currently only handles the 'macro' and + 'weighted' averages. For multiclass targets, `average=None` is only + implemented for `multi_class='ovr'` and `average='micro'` is only + implemented for `multi_class='ovr'`. + + ``'micro'``: + Calculate metrics globally by considering each element of the label + indicator matrix as a label. + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. + ``'weighted'``: + Calculate metrics for each label, and find their average, weighted + by support (the number of true instances for each label). + ``'samples'``: + Calculate metrics for each instance, and find their average. + + Will be ignored when ``y_true`` is binary. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + max_fpr : float > 0 and <= 1, default=None + If not ``None``, the standardized partial AUC [2]_ over the range + [0, max_fpr] is returned. For the multiclass case, ``max_fpr``, + should be either equal to ``None`` or ``1.0`` as AUC ROC partial + computation currently is not supported for multiclass. + + multi_class : {'raise', 'ovr', 'ovo'}, default='raise' + Only used for multiclass targets. Determines the type of configuration + to use. The default value raises an error, so either + ``'ovr'`` or ``'ovo'`` must be passed explicitly. + + ``'ovr'``: + Stands for One-vs-rest. Computes the AUC of each class + against the rest [3]_ [4]_. This + treats the multiclass case in the same way as the multilabel case. + Sensitive to class imbalance even when ``average == 'macro'``, + because class imbalance affects the composition of each of the + 'rest' groupings. + ``'ovo'``: + Stands for One-vs-one. Computes the average AUC of all + possible pairwise combinations of classes [5]_. + Insensitive to class imbalance when + ``average == 'macro'``. + + labels : array-like of shape (n_classes,), default=None + Only used for multiclass targets. List of labels that index the + classes in ``y_score``. If ``None``, the numerical or lexicographical + order of the labels in ``y_true`` is used. + + Returns + ------- + auc : float + Area Under the Curve score. + + See Also + -------- + average_precision_score : Area under the precision-recall curve. + roc_curve : Compute Receiver operating characteristic (ROC) curve. + RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic + (ROC) curve given an estimator and some data. + RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic + (ROC) curve given the true and predicted values. + + Notes + ----- + The Gini Coefficient is a summary measure of the ranking ability of binary + classifiers. It is expressed using the area under of the ROC as follows: + + G = 2 * AUC - 1 + + Where G is the Gini coefficient and AUC is the ROC-AUC score. This normalisation + will ensure that random guessing will yield a score of 0 in expectation, and it is + upper bounded by 1. + + References + ---------- + .. [1] `Wikipedia entry for the Receiver operating characteristic + `_ + + .. [2] `Analyzing a portion of the ROC curve. McClish, 1989 + `_ + + .. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving + probability estimation trees (Section 6.2), CeDER Working Paper + #IS-00-04, Stern School of Business, New York University. + + .. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern + Recognition Letters, 27(8), 861-874. + `_ + + .. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area + Under the ROC Curve for Multiple Class Classification Problems. + Machine Learning, 45(2), 171-186. + `_ + .. [6] `Wikipedia entry for the Gini coefficient + `_ + + Examples + -------- + Binary case: + + >>> from sklearn.datasets import load_breast_cancer + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.metrics import roc_auc_score + >>> X, y = load_breast_cancer(return_X_y=True) + >>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y) + >>> roc_auc_score(y, clf.predict_proba(X)[:, 1]) + 0.99... + >>> roc_auc_score(y, clf.decision_function(X)) + 0.99... + + Multiclass case: + + >>> from sklearn.datasets import load_iris + >>> X, y = load_iris(return_X_y=True) + >>> clf = LogisticRegression(solver="liblinear").fit(X, y) + >>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr') + 0.99... + + Multilabel case: + + >>> import numpy as np + >>> from sklearn.datasets import make_multilabel_classification + >>> from sklearn.multioutput import MultiOutputClassifier + >>> X, y = make_multilabel_classification(random_state=0) + >>> clf = MultiOutputClassifier(clf).fit(X, y) + >>> # get a list of n_output containing probability arrays of shape + >>> # (n_samples, n_classes) + >>> y_pred = clf.predict_proba(X) + >>> # extract the positive columns for each output + >>> y_pred = np.transpose([pred[:, 1] for pred in y_pred]) + >>> roc_auc_score(y, y_pred, average=None) + array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...]) + >>> from sklearn.linear_model import RidgeClassifierCV + >>> clf = RidgeClassifierCV().fit(X, y) + >>> roc_auc_score(y, clf.decision_function(X), average=None) + array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...]) + """ + + y_type = type_of_target(y_true, input_name="y_true") + y_true = check_array(y_true, ensure_2d=False, dtype=None) + y_score = check_array(y_score, ensure_2d=False) + + if y_type == "multiclass" or ( + y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2 + ): + # do not support partial ROC computation for multiclass + if max_fpr is not None and max_fpr != 1.0: + raise ValueError( + "Partial AUC computation not available in " + "multiclass setting, 'max_fpr' must be" + " set to `None`, received `max_fpr={0}` " + "instead".format(max_fpr) + ) + if multi_class == "raise": + raise ValueError("multi_class must be in ('ovo', 'ovr')") + return _multiclass_roc_auc_score( + y_true, y_score, labels, multi_class, average, sample_weight + ) + elif y_type == "binary": + labels = np.unique(y_true) + y_true = label_binarize(y_true, classes=labels)[:, 0] + return _average_binary_score( + partial(_binary_roc_auc_score, max_fpr=max_fpr), + y_true, + y_score, + average, + sample_weight=sample_weight, + ) + else: # multilabel-indicator + return _average_binary_score( + partial(_binary_roc_auc_score, max_fpr=max_fpr), + y_true, + y_score, + average, + sample_weight=sample_weight, + ) + + +def _multiclass_roc_auc_score( + y_true, y_score, labels, multi_class, average, sample_weight +): + """Multiclass roc auc score. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True multiclass labels. + + y_score : array-like of shape (n_samples, n_classes) + Target scores corresponding to probability estimates of a sample + belonging to a particular class + + labels : array-like of shape (n_classes,) or None + List of labels to index ``y_score`` used for multiclass. If ``None``, + the lexical order of ``y_true`` is used to index ``y_score``. + + multi_class : {'ovr', 'ovo'} + Determines the type of multiclass configuration to use. + ``'ovr'``: + Calculate metrics for the multiclass case using the one-vs-rest + approach. + ``'ovo'``: + Calculate metrics for the multiclass case using the one-vs-one + approach. + + average : {'micro', 'macro', 'weighted'} + Determines the type of averaging performed on the pairwise binary + metric scores + ``'micro'``: + Calculate metrics for the binarized-raveled classes. Only supported + for `multi_class='ovr'`. + + .. versionadded:: 1.2 + + ``'macro'``: + Calculate metrics for each label, and find their unweighted + mean. This does not take label imbalance into account. Classes + are assumed to be uniformly distributed. + ``'weighted'``: + Calculate metrics for each label, taking into account the + prevalence of the classes. + + sample_weight : array-like of shape (n_samples,) or None + Sample weights. + + """ + # validation of the input y_score + if not np.allclose(1, y_score.sum(axis=1)): + raise ValueError( + "Target scores need to be probabilities for multiclass " + "roc_auc, i.e. they should sum up to 1.0 over classes" + ) + + # validation for multiclass parameter specifications + average_options = ("macro", "weighted", None) + if multi_class == "ovr": + average_options = ("micro",) + average_options + if average not in average_options: + raise ValueError( + "average must be one of {0} for multiclass problems".format(average_options) + ) + + multiclass_options = ("ovo", "ovr") + if multi_class not in multiclass_options: + raise ValueError( + "multi_class='{0}' is not supported " + "for multiclass ROC AUC, multi_class must be " + "in {1}".format(multi_class, multiclass_options) + ) + + if average is None and multi_class == "ovo": + raise NotImplementedError( + "average=None is not implemented for multi_class='ovo'." + ) + + if labels is not None: + labels = column_or_1d(labels) + classes = _unique(labels) + if len(classes) != len(labels): + raise ValueError("Parameter 'labels' must be unique") + if not np.array_equal(classes, labels): + raise ValueError("Parameter 'labels' must be ordered") + if len(classes) != y_score.shape[1]: + raise ValueError( + "Number of given labels, {0}, not equal to the number " + "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) + ) + if len(np.setdiff1d(y_true, classes)): + raise ValueError("'y_true' contains labels not in parameter 'labels'") + else: + classes = _unique(y_true) + if len(classes) != y_score.shape[1]: + raise ValueError( + "Number of classes in y_true not equal to the number of " + "columns in 'y_score'" + ) + + if multi_class == "ovo": + if sample_weight is not None: + raise ValueError( + "sample_weight is not supported " + "for multiclass one-vs-one ROC AUC, " + "'sample_weight' must be None in this case." + ) + y_true_encoded = _encode(y_true, uniques=classes) + # Hand & Till (2001) implementation (ovo) + return _average_multiclass_ovo_score( + _binary_roc_auc_score, y_true_encoded, y_score, average=average + ) + else: + # ovr is same as multi-label + y_true_multilabel = label_binarize(y_true, classes=classes) + return _average_binary_score( + _binary_roc_auc_score, + y_true_multilabel, + y_score, + average, + sample_weight=sample_weight, + ) + + +def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): + """Calculate true and false positives per binary classification threshold. + + Parameters + ---------- + y_true : ndarray of shape (n_samples,) + True targets of binary classification. + + y_score : ndarray of shape (n_samples,) + Estimated probabilities or output of a decision function. + + pos_label : int, float, bool or str, default=None + The label of the positive class. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + fps : ndarray of shape (n_thresholds,) + A count of false positives, at index i being the number of negative + samples assigned a score >= thresholds[i]. The total number of + negative samples is equal to fps[-1] (thus true negatives are given by + fps[-1] - fps). + + tps : ndarray of shape (n_thresholds,) + An increasing count of true positives, at index i being the number + of positive samples assigned a score >= thresholds[i]. The total + number of positive samples is equal to tps[-1] (thus false negatives + are given by tps[-1] - tps). + + thresholds : ndarray of shape (n_thresholds,) + Decreasing score values. + """ + # Check to make sure y_true is valid + y_type = type_of_target(y_true, input_name="y_true") + if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): + raise ValueError("{0} format is not supported".format(y_type)) + + check_consistent_length(y_true, y_score, sample_weight) + y_true = column_or_1d(y_true) + y_score = column_or_1d(y_score) + assert_all_finite(y_true) + assert_all_finite(y_score) + + # Filter out zero-weighted samples, as they should not impact the result + if sample_weight is not None: + sample_weight = column_or_1d(sample_weight) + sample_weight = _check_sample_weight(sample_weight, y_true) + nonzero_weight_mask = sample_weight != 0 + y_true = y_true[nonzero_weight_mask] + y_score = y_score[nonzero_weight_mask] + sample_weight = sample_weight[nonzero_weight_mask] + + pos_label = _check_pos_label_consistency(pos_label, y_true) + + # make y_true a boolean vector + y_true = y_true == pos_label + + # sort scores and corresponding truth values + desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] + y_score = y_score[desc_score_indices] + y_true = y_true[desc_score_indices] + if sample_weight is not None: + weight = sample_weight[desc_score_indices] + else: + weight = 1.0 + + # y_score typically has many tied values. Here we extract + # the indices associated with the distinct values. We also + # concatenate a value for the end of the curve. + distinct_value_indices = np.where(np.diff(y_score))[0] + threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] + + # accumulate the true positives with decreasing threshold + tps = stable_cumsum(y_true * weight)[threshold_idxs] + if sample_weight is not None: + # express fps as a cumsum to ensure fps is increasing even in + # the presence of floating point errors + fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs] + else: + fps = 1 + threshold_idxs - tps + return fps, tps, y_score[threshold_idxs] + + +@validate_params( + { + "y_true": ["array-like"], + "probas_pred": ["array-like"], + "pos_label": [Real, str, "boolean", None], + "sample_weight": ["array-like", None], + "drop_intermediate": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def precision_recall_curve( + y_true, probas_pred, *, pos_label=None, sample_weight=None, drop_intermediate=False +): + """Compute precision-recall pairs for different probability thresholds. + + Note: this implementation is restricted to the binary classification task. + + The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of + true positives and ``fp`` the number of false positives. The precision is + intuitively the ability of the classifier not to label as positive a sample + that is negative. + + The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of + true positives and ``fn`` the number of false negatives. The recall is + intuitively the ability of the classifier to find all the positive samples. + + The last precision and recall values are 1. and 0. respectively and do not + have a corresponding threshold. This ensures that the graph starts on the + y axis. + + The first precision and recall values are precision=class balance and recall=1.0 + which corresponds to a classifier that always predicts the positive class. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True binary labels. If labels are not either {-1, 1} or {0, 1}, then + pos_label should be explicitly given. + + probas_pred : array-like of shape (n_samples,) + Target scores, can either be probability estimates of the positive + class, or non-thresholded measure of decisions (as returned by + `decision_function` on some classifiers). + + pos_label : int, float, bool or str, default=None + The label of the positive class. + When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1}, + ``pos_label`` is set to 1, otherwise an error will be raised. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + drop_intermediate : bool, default=False + Whether to drop some suboptimal thresholds which would not appear + on a plotted precision-recall curve. This is useful in order to create + lighter precision-recall curves. + + .. versionadded:: 1.3 + + Returns + ------- + precision : ndarray of shape (n_thresholds + 1,) + Precision values such that element i is the precision of + predictions with score >= thresholds[i] and the last element is 1. + + recall : ndarray of shape (n_thresholds + 1,) + Decreasing recall values such that element i is the recall of + predictions with score >= thresholds[i] and the last element is 0. + + thresholds : ndarray of shape (n_thresholds,) + Increasing thresholds on the decision function used to compute + precision and recall where `n_thresholds = len(np.unique(probas_pred))`. + + See Also + -------- + PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given + a binary classifier. + PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve + using predictions from a binary classifier. + average_precision_score : Compute average precision from prediction scores. + det_curve: Compute error rates for different probability thresholds. + roc_curve : Compute Receiver operating characteristic (ROC) curve. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import precision_recall_curve + >>> y_true = np.array([0, 0, 1, 1]) + >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) + >>> precision, recall, thresholds = precision_recall_curve( + ... y_true, y_scores) + >>> precision + array([0.5 , 0.66666667, 0.5 , 1. , 1. ]) + >>> recall + array([1. , 1. , 0.5, 0.5, 0. ]) + >>> thresholds + array([0.1 , 0.35, 0.4 , 0.8 ]) + """ + fps, tps, thresholds = _binary_clf_curve( + y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight + ) + + if drop_intermediate and len(fps) > 2: + # Drop thresholds corresponding to points where true positives (tps) + # do not change from the previous or subsequent point. This will keep + # only the first and last point for each tps value. All points + # with the same tps value have the same recall and thus x coordinate. + # They appear as a vertical line on the plot. + optimal_idxs = np.where( + np.concatenate( + [[True], np.logical_or(np.diff(tps[:-1]), np.diff(tps[1:])), [True]] + ) + )[0] + fps = fps[optimal_idxs] + tps = tps[optimal_idxs] + thresholds = thresholds[optimal_idxs] + + ps = tps + fps + # Initialize the result array with zeros to make sure that precision[ps == 0] + # does not contain uninitialized values. + precision = np.zeros_like(tps) + np.divide(tps, ps, out=precision, where=(ps != 0)) + + # When no positive label in y_true, recall is set to 1 for all thresholds + # tps[-1] == 0 <=> y_true == all negative labels + if tps[-1] == 0: + warnings.warn( + "No positive class found in y_true, " + "recall is set to one for all thresholds." + ) + recall = np.ones_like(tps) + else: + recall = tps / tps[-1] + + # reverse the outputs so recall is decreasing + sl = slice(None, None, -1) + return np.hstack((precision[sl], 1)), np.hstack((recall[sl], 0)), thresholds[sl] + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "pos_label": [Real, str, "boolean", None], + "sample_weight": ["array-like", None], + "drop_intermediate": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def roc_curve( + y_true, y_score, *, pos_label=None, sample_weight=None, drop_intermediate=True +): + """Compute Receiver operating characteristic (ROC). + + Note: this implementation is restricted to the binary classification task. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True binary labels. If labels are not either {-1, 1} or {0, 1}, then + pos_label should be explicitly given. + + y_score : array-like of shape (n_samples,) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by "decision_function" on some classifiers). + + pos_label : int, float, bool or str, default=None + The label of the positive class. + When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, + ``pos_label`` is set to 1, otherwise an error will be raised. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + drop_intermediate : bool, default=True + Whether to drop some suboptimal thresholds which would not appear + on a plotted ROC curve. This is useful in order to create lighter + ROC curves. + + .. versionadded:: 0.17 + parameter *drop_intermediate*. + + Returns + ------- + fpr : ndarray of shape (>2,) + Increasing false positive rates such that element i is the false + positive rate of predictions with score >= `thresholds[i]`. + + tpr : ndarray of shape (>2,) + Increasing true positive rates such that element `i` is the true + positive rate of predictions with score >= `thresholds[i]`. + + thresholds : ndarray of shape (n_thresholds,) + Decreasing thresholds on the decision function used to compute + fpr and tpr. `thresholds[0]` represents no instances being predicted + and is arbitrarily set to `np.inf`. + + See Also + -------- + RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic + (ROC) curve given an estimator and some data. + RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic + (ROC) curve given the true and predicted values. + det_curve: Compute error rates for different probability thresholds. + roc_auc_score : Compute the area under the ROC curve. + + Notes + ----- + Since the thresholds are sorted from low to high values, they + are reversed upon returning them to ensure they correspond to both ``fpr`` + and ``tpr``, which are sorted in reversed order during their calculation. + + An arbitrary threshold is added for the case `tpr=0` and `fpr=0` to + ensure that the curve starts at `(0, 0)`. This threshold corresponds to the + `np.inf`. + + References + ---------- + .. [1] `Wikipedia entry for the Receiver operating characteristic + `_ + + .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition + Letters, 2006, 27(8):861-874. + + Examples + -------- + >>> import numpy as np + >>> from sklearn import metrics + >>> y = np.array([1, 1, 2, 2]) + >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) + >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) + >>> fpr + array([0. , 0. , 0.5, 0.5, 1. ]) + >>> tpr + array([0. , 0.5, 0.5, 1. , 1. ]) + >>> thresholds + array([ inf, 0.8 , 0.4 , 0.35, 0.1 ]) + """ + fps, tps, thresholds = _binary_clf_curve( + y_true, y_score, pos_label=pos_label, sample_weight=sample_weight + ) + + # Attempt to drop thresholds corresponding to points in between and + # collinear with other points. These are always suboptimal and do not + # appear on a plotted ROC curve (and thus do not affect the AUC). + # Here np.diff(_, 2) is used as a "second derivative" to tell if there + # is a corner at the point. Both fps and tps must be tested to handle + # thresholds with multiple data points (which are combined in + # _binary_clf_curve). This keeps all cases where the point should be kept, + # but does not drop more complicated cases like fps = [1, 3, 7], + # tps = [1, 2, 4]; there is no harm in keeping too many thresholds. + if drop_intermediate and len(fps) > 2: + optimal_idxs = np.where( + np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True] + )[0] + fps = fps[optimal_idxs] + tps = tps[optimal_idxs] + thresholds = thresholds[optimal_idxs] + + # Add an extra threshold position + # to make sure that the curve starts at (0, 0) + tps = np.r_[0, tps] + fps = np.r_[0, fps] + # get dtype of `y_score` even if it is an array-like + thresholds = np.r_[np.inf, thresholds] + + if fps[-1] <= 0: + warnings.warn( + "No negative samples in y_true, false positive value should be meaningless", + UndefinedMetricWarning, + ) + fpr = np.repeat(np.nan, fps.shape) + else: + fpr = fps / fps[-1] + + if tps[-1] <= 0: + warnings.warn( + "No positive samples in y_true, true positive value should be meaningless", + UndefinedMetricWarning, + ) + tpr = np.repeat(np.nan, tps.shape) + else: + tpr = tps / tps[-1] + + return fpr, tpr, thresholds + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_score": ["array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def label_ranking_average_precision_score(y_true, y_score, *, sample_weight=None): + """Compute ranking-based average precision. + + Label ranking average precision (LRAP) is the average over each ground + truth label assigned to each sample, of the ratio of true vs. total + labels with lower score. + + This metric is used in multilabel ranking problem, where the goal + is to give better rank to the labels associated to each sample. + + The obtained score is always strictly greater than 0 and + the best value is 1. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : {array-like, sparse matrix} of shape (n_samples, n_labels) + True binary labels in binary indicator format. + + y_score : array-like of shape (n_samples, n_labels) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by "decision_function" on some classifiers). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + .. versionadded:: 0.20 + + Returns + ------- + score : float + Ranking-based average precision score. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import label_ranking_average_precision_score + >>> y_true = np.array([[1, 0, 0], [0, 0, 1]]) + >>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]]) + >>> label_ranking_average_precision_score(y_true, y_score) + 0.416... + """ + check_consistent_length(y_true, y_score, sample_weight) + y_true = check_array(y_true, ensure_2d=False, accept_sparse="csr") + y_score = check_array(y_score, ensure_2d=False) + + if y_true.shape != y_score.shape: + raise ValueError("y_true and y_score have different shape") + + # Handle badly formatted array and the degenerate case with one label + y_type = type_of_target(y_true, input_name="y_true") + if y_type != "multilabel-indicator" and not ( + y_type == "binary" and y_true.ndim == 2 + ): + raise ValueError("{0} format is not supported".format(y_type)) + + if not issparse(y_true): + y_true = csr_matrix(y_true) + + y_score = -y_score + + n_samples, n_labels = y_true.shape + + out = 0.0 + for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): + relevant = y_true.indices[start:stop] + + if relevant.size == 0 or relevant.size == n_labels: + # If all labels are relevant or unrelevant, the score is also + # equal to 1. The label ranking has no meaning. + aux = 1.0 + else: + scores_i = y_score[i] + rank = rankdata(scores_i, "max")[relevant] + L = rankdata(scores_i[relevant], "max") + aux = (L / rank).mean() + + if sample_weight is not None: + aux = aux * sample_weight[i] + out += aux + + if sample_weight is None: + out /= n_samples + else: + out /= np.sum(sample_weight) + + return out + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def coverage_error(y_true, y_score, *, sample_weight=None): + """Coverage error measure. + + Compute how far we need to go through the ranked scores to cover all + true labels. The best value is equal to the average number + of labels in ``y_true`` per sample. + + Ties in ``y_scores`` are broken by giving maximal rank that would have + been assigned to all tied values. + + Note: Our implementation's score is 1 greater than the one given in + Tsoumakas et al., 2010. This extends it to handle the degenerate case + in which an instance has 0 true labels. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples, n_labels) + True binary labels in binary indicator format. + + y_score : array-like of shape (n_samples, n_labels) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by "decision_function" on some classifiers). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + coverage_error : float + The coverage error. + + References + ---------- + .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). + Mining multi-label data. In Data mining and knowledge discovery + handbook (pp. 667-685). Springer US. + + Examples + -------- + >>> from sklearn.metrics import coverage_error + >>> y_true = [[1, 0, 0], [0, 1, 1]] + >>> y_score = [[1, 0, 0], [0, 1, 1]] + >>> coverage_error(y_true, y_score) + 1.5 + """ + y_true = check_array(y_true, ensure_2d=True) + y_score = check_array(y_score, ensure_2d=True) + check_consistent_length(y_true, y_score, sample_weight) + + y_type = type_of_target(y_true, input_name="y_true") + if y_type != "multilabel-indicator": + raise ValueError("{0} format is not supported".format(y_type)) + + if y_true.shape != y_score.shape: + raise ValueError("y_true and y_score have different shape") + + y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) + y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) + coverage = (y_score >= y_min_relevant).sum(axis=1) + coverage = coverage.filled(0) + + return np.average(coverage, weights=sample_weight) + + +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_score": ["array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def label_ranking_loss(y_true, y_score, *, sample_weight=None): + """Compute Ranking loss measure. + + Compute the average number of label pairs that are incorrectly ordered + given y_score weighted by the size of the label set and the number of + labels not in the label set. + + This is similar to the error set size, but weighted by the number of + relevant and irrelevant labels. The best performance is achieved with + a ranking loss of zero. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + A function *label_ranking_loss* + + Parameters + ---------- + y_true : {array-like, sparse matrix} of shape (n_samples, n_labels) + True binary labels in binary indicator format. + + y_score : array-like of shape (n_samples, n_labels) + Target scores, can either be probability estimates of the positive + class, confidence values, or non-thresholded measure of decisions + (as returned by "decision_function" on some classifiers). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + loss : float + Average number of label pairs that are incorrectly ordered given + y_score weighted by the size of the label set and the number of labels not + in the label set. + + References + ---------- + .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). + Mining multi-label data. In Data mining and knowledge discovery + handbook (pp. 667-685). Springer US. + + Examples + -------- + >>> from sklearn.metrics import label_ranking_loss + >>> y_true = [[1, 0, 0], [0, 0, 1]] + >>> y_score = [[0.75, 0.5, 1], [1, 0.2, 0.1]] + >>> label_ranking_loss(y_true, y_score) + 0.75... + """ + y_true = check_array(y_true, ensure_2d=False, accept_sparse="csr") + y_score = check_array(y_score, ensure_2d=False) + check_consistent_length(y_true, y_score, sample_weight) + + y_type = type_of_target(y_true, input_name="y_true") + if y_type not in ("multilabel-indicator",): + raise ValueError("{0} format is not supported".format(y_type)) + + if y_true.shape != y_score.shape: + raise ValueError("y_true and y_score have different shape") + + n_samples, n_labels = y_true.shape + + y_true = csr_matrix(y_true) + + loss = np.zeros(n_samples) + for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): + # Sort and bin the label scores + unique_scores, unique_inverse = np.unique(y_score[i], return_inverse=True) + true_at_reversed_rank = np.bincount( + unique_inverse[y_true.indices[start:stop]], minlength=len(unique_scores) + ) + all_at_reversed_rank = np.bincount(unique_inverse, minlength=len(unique_scores)) + false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank + + # if the scores are ordered, it's possible to count the number of + # incorrectly ordered paires in linear time by cumulatively counting + # how many false labels of a given score have a score higher than the + # accumulated true labels with lower score. + loss[i] = np.dot(true_at_reversed_rank.cumsum(), false_at_reversed_rank) + + n_positives = count_nonzero(y_true, axis=1) + with np.errstate(divide="ignore", invalid="ignore"): + loss /= (n_labels - n_positives) * n_positives + + # When there is no positive or no negative labels, those values should + # be consider as correct, i.e. the ranking doesn't matter. + loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.0 + + return np.average(loss, weights=sample_weight) + + +def _dcg_sample_scores(y_true, y_score, k=None, log_base=2, ignore_ties=False): + """Compute Discounted Cumulative Gain. + + Sum the true scores ranked in the order induced by the predicted scores, + after applying a logarithmic discount. + + This ranking metric yields a high value if true labels are ranked high by + ``y_score``. + + Parameters + ---------- + y_true : ndarray of shape (n_samples, n_labels) + True targets of multilabel classification, or true scores of entities + to be ranked. + + y_score : ndarray of shape (n_samples, n_labels) + Target scores, can either be probability estimates, confidence values, + or non-thresholded measure of decisions (as returned by + "decision_function" on some classifiers). + + k : int, default=None + Only consider the highest k scores in the ranking. If `None`, use all + outputs. + + log_base : float, default=2 + Base of the logarithm used for the discount. A low value means a + sharper discount (top results are more important). + + ignore_ties : bool, default=False + Assume that there are no ties in y_score (which is likely to be the + case if y_score is continuous) for efficiency gains. + + Returns + ------- + discounted_cumulative_gain : ndarray of shape (n_samples,) + The DCG score for each sample. + + See Also + -------- + ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted + Cumulative Gain (the DCG obtained for a perfect ranking), in order to + have a score between 0 and 1. + """ + discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base)) + if k is not None: + discount[k:] = 0 + if ignore_ties: + ranking = np.argsort(y_score)[:, ::-1] + ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking] + cumulative_gains = discount.dot(ranked.T) + else: + discount_cumsum = np.cumsum(discount) + cumulative_gains = [ + _tie_averaged_dcg(y_t, y_s, discount_cumsum) + for y_t, y_s in zip(y_true, y_score) + ] + cumulative_gains = np.asarray(cumulative_gains) + return cumulative_gains + + +def _tie_averaged_dcg(y_true, y_score, discount_cumsum): + """ + Compute DCG by averaging over possible permutations of ties. + + The gain (`y_true`) of an index falling inside a tied group (in the order + induced by `y_score`) is replaced by the average gain within this group. + The discounted gain for a tied group is then the average `y_true` within + this group times the sum of discounts of the corresponding ranks. + + This amounts to averaging scores for all possible orderings of the tied + groups. + + (note in the case of dcg@k the discount is 0 after index k) + + Parameters + ---------- + y_true : ndarray + The true relevance scores. + + y_score : ndarray + Predicted scores. + + discount_cumsum : ndarray + Precomputed cumulative sum of the discounts. + + Returns + ------- + discounted_cumulative_gain : float + The discounted cumulative gain. + + References + ---------- + McSherry, F., & Najork, M. (2008, March). Computing information retrieval + performance measures efficiently in the presence of tied scores. In + European conference on information retrieval (pp. 414-421). Springer, + Berlin, Heidelberg. + """ + _, inv, counts = np.unique(-y_score, return_inverse=True, return_counts=True) + ranked = np.zeros(len(counts)) + np.add.at(ranked, inv, y_true) + ranked /= counts + groups = np.cumsum(counts) - 1 + discount_sums = np.empty(len(counts)) + discount_sums[0] = discount_cumsum[groups[0]] + discount_sums[1:] = np.diff(discount_cumsum[groups]) + return (ranked * discount_sums).sum() + + +def _check_dcg_target_type(y_true): + y_type = type_of_target(y_true, input_name="y_true") + supported_fmt = ( + "multilabel-indicator", + "continuous-multioutput", + "multiclass-multioutput", + ) + if y_type not in supported_fmt: + raise ValueError( + "Only {} formats are supported. Got {} instead".format( + supported_fmt, y_type + ) + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "k": [Interval(Integral, 1, None, closed="left"), None], + "log_base": [Interval(Real, 0.0, None, closed="neither")], + "sample_weight": ["array-like", None], + "ignore_ties": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def dcg_score( + y_true, y_score, *, k=None, log_base=2, sample_weight=None, ignore_ties=False +): + """Compute Discounted Cumulative Gain. + + Sum the true scores ranked in the order induced by the predicted scores, + after applying a logarithmic discount. + + This ranking metric yields a high value if true labels are ranked high by + ``y_score``. + + Usually the Normalized Discounted Cumulative Gain (NDCG, computed by + ndcg_score) is preferred. + + Parameters + ---------- + y_true : array-like of shape (n_samples, n_labels) + True targets of multilabel classification, or true scores of entities + to be ranked. + + y_score : array-like of shape (n_samples, n_labels) + Target scores, can either be probability estimates, confidence values, + or non-thresholded measure of decisions (as returned by + "decision_function" on some classifiers). + + k : int, default=None + Only consider the highest k scores in the ranking. If None, use all + outputs. + + log_base : float, default=2 + Base of the logarithm used for the discount. A low value means a + sharper discount (top results are more important). + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, all samples are given the same weight. + + ignore_ties : bool, default=False + Assume that there are no ties in y_score (which is likely to be the + case if y_score is continuous) for efficiency gains. + + Returns + ------- + discounted_cumulative_gain : float + The averaged sample DCG scores. + + See Also + -------- + ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted + Cumulative Gain (the DCG obtained for a perfect ranking), in order to + have a score between 0 and 1. + + References + ---------- + `Wikipedia entry for Discounted Cumulative Gain + `_. + + Jarvelin, K., & Kekalainen, J. (2002). + Cumulated gain-based evaluation of IR techniques. ACM Transactions on + Information Systems (TOIS), 20(4), 422-446. + + Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May). + A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th + Annual Conference on Learning Theory (COLT 2013). + + McSherry, F., & Najork, M. (2008, March). Computing information retrieval + performance measures efficiently in the presence of tied scores. In + European conference on information retrieval (pp. 414-421). Springer, + Berlin, Heidelberg. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import dcg_score + >>> # we have groud-truth relevance of some answers to a query: + >>> true_relevance = np.asarray([[10, 0, 0, 1, 5]]) + >>> # we predict scores for the answers + >>> scores = np.asarray([[.1, .2, .3, 4, 70]]) + >>> dcg_score(true_relevance, scores) + 9.49... + >>> # we can set k to truncate the sum; only top k answers contribute + >>> dcg_score(true_relevance, scores, k=2) + 5.63... + >>> # now we have some ties in our prediction + >>> scores = np.asarray([[1, 0, 0, 0, 1]]) + >>> # by default ties are averaged, so here we get the average true + >>> # relevance of our top predictions: (10 + 5) / 2 = 7.5 + >>> dcg_score(true_relevance, scores, k=1) + 7.5 + >>> # we can choose to ignore ties for faster results, but only + >>> # if we know there aren't ties in our scores, otherwise we get + >>> # wrong results: + >>> dcg_score(true_relevance, + ... scores, k=1, ignore_ties=True) + 5.0 + """ + y_true = check_array(y_true, ensure_2d=False) + y_score = check_array(y_score, ensure_2d=False) + check_consistent_length(y_true, y_score, sample_weight) + _check_dcg_target_type(y_true) + return np.average( + _dcg_sample_scores( + y_true, y_score, k=k, log_base=log_base, ignore_ties=ignore_ties + ), + weights=sample_weight, + ) + + +def _ndcg_sample_scores(y_true, y_score, k=None, ignore_ties=False): + """Compute Normalized Discounted Cumulative Gain. + + Sum the true scores ranked in the order induced by the predicted scores, + after applying a logarithmic discount. Then divide by the best possible + score (Ideal DCG, obtained for a perfect ranking) to obtain a score between + 0 and 1. + + This ranking metric yields a high value if true labels are ranked high by + ``y_score``. + + Parameters + ---------- + y_true : ndarray of shape (n_samples, n_labels) + True targets of multilabel classification, or true scores of entities + to be ranked. + + y_score : ndarray of shape (n_samples, n_labels) + Target scores, can either be probability estimates, confidence values, + or non-thresholded measure of decisions (as returned by + "decision_function" on some classifiers). + + k : int, default=None + Only consider the highest k scores in the ranking. If None, use all + outputs. + + ignore_ties : bool, default=False + Assume that there are no ties in y_score (which is likely to be the + case if y_score is continuous) for efficiency gains. + + Returns + ------- + normalized_discounted_cumulative_gain : ndarray of shape (n_samples,) + The NDCG score for each sample (float in [0., 1.]). + + See Also + -------- + dcg_score : Discounted Cumulative Gain (not normalized). + + """ + gain = _dcg_sample_scores(y_true, y_score, k, ignore_ties=ignore_ties) + # Here we use the order induced by y_true so we can ignore ties since + # the gain associated to tied indices is the same (permuting ties doesn't + # change the value of the re-ordered y_true) + normalizing_gain = _dcg_sample_scores(y_true, y_true, k, ignore_ties=True) + all_irrelevant = normalizing_gain == 0 + gain[all_irrelevant] = 0 + gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant] + return gain + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "k": [Interval(Integral, 1, None, closed="left"), None], + "sample_weight": ["array-like", None], + "ignore_ties": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False): + """Compute Normalized Discounted Cumulative Gain. + + Sum the true scores ranked in the order induced by the predicted scores, + after applying a logarithmic discount. Then divide by the best possible + score (Ideal DCG, obtained for a perfect ranking) to obtain a score between + 0 and 1. + + This ranking metric returns a high value if true labels are ranked high by + ``y_score``. + + Parameters + ---------- + y_true : array-like of shape (n_samples, n_labels) + True targets of multilabel classification, or true scores of entities + to be ranked. Negative values in `y_true` may result in an output + that is not between 0 and 1. + + y_score : array-like of shape (n_samples, n_labels) + Target scores, can either be probability estimates, confidence values, + or non-thresholded measure of decisions (as returned by + "decision_function" on some classifiers). + + k : int, default=None + Only consider the highest k scores in the ranking. If `None`, use all + outputs. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, all samples are given the same weight. + + ignore_ties : bool, default=False + Assume that there are no ties in y_score (which is likely to be the + case if y_score is continuous) for efficiency gains. + + Returns + ------- + normalized_discounted_cumulative_gain : float in [0., 1.] + The averaged NDCG scores for all samples. + + See Also + -------- + dcg_score : Discounted Cumulative Gain (not normalized). + + References + ---------- + `Wikipedia entry for Discounted Cumulative Gain + `_ + + Jarvelin, K., & Kekalainen, J. (2002). + Cumulated gain-based evaluation of IR techniques. ACM Transactions on + Information Systems (TOIS), 20(4), 422-446. + + Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May). + A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th + Annual Conference on Learning Theory (COLT 2013) + + McSherry, F., & Najork, M. (2008, March). Computing information retrieval + performance measures efficiently in the presence of tied scores. In + European conference on information retrieval (pp. 414-421). Springer, + Berlin, Heidelberg. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import ndcg_score + >>> # we have groud-truth relevance of some answers to a query: + >>> true_relevance = np.asarray([[10, 0, 0, 1, 5]]) + >>> # we predict some scores (relevance) for the answers + >>> scores = np.asarray([[.1, .2, .3, 4, 70]]) + >>> ndcg_score(true_relevance, scores) + 0.69... + >>> scores = np.asarray([[.05, 1.1, 1., .5, .0]]) + >>> ndcg_score(true_relevance, scores) + 0.49... + >>> # we can set k to truncate the sum; only top k answers contribute. + >>> ndcg_score(true_relevance, scores, k=4) + 0.35... + >>> # the normalization takes k into account so a perfect answer + >>> # would still get 1.0 + >>> ndcg_score(true_relevance, true_relevance, k=4) + 1.0... + >>> # now we have some ties in our prediction + >>> scores = np.asarray([[1, 0, 0, 0, 1]]) + >>> # by default ties are averaged, so here we get the average (normalized) + >>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75 + >>> ndcg_score(true_relevance, scores, k=1) + 0.75... + >>> # we can choose to ignore ties for faster results, but only + >>> # if we know there aren't ties in our scores, otherwise we get + >>> # wrong results: + >>> ndcg_score(true_relevance, + ... scores, k=1, ignore_ties=True) + 0.5... + """ + y_true = check_array(y_true, ensure_2d=False) + y_score = check_array(y_score, ensure_2d=False) + check_consistent_length(y_true, y_score, sample_weight) + + if y_true.min() < 0: + raise ValueError("ndcg_score should not be used on negative y_true values.") + if y_true.ndim > 1 and y_true.shape[1] <= 1: + raise ValueError( + "Computing NDCG is only meaningful when there is more than 1 document. " + f"Got {y_true.shape[1]} instead." + ) + _check_dcg_target_type(y_true) + gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties) + return np.average(gain, weights=sample_weight) + + +@validate_params( + { + "y_true": ["array-like"], + "y_score": ["array-like"], + "k": [Interval(Integral, 1, None, closed="left")], + "normalize": ["boolean"], + "sample_weight": ["array-like", None], + "labels": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def top_k_accuracy_score( + y_true, y_score, *, k=2, normalize=True, sample_weight=None, labels=None +): + """Top-k Accuracy classification score. + + This metric computes the number of times where the correct label is among + the top `k` labels predicted (ranked by predicted scores). Note that the + multilabel case isn't covered here. + + Read more in the :ref:`User Guide ` + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + True labels. + + y_score : array-like of shape (n_samples,) or (n_samples, n_classes) + Target scores. These can be either probability estimates or + non-thresholded decision values (as returned by + :term:`decision_function` on some classifiers). + The binary case expects scores with shape (n_samples,) while the + multiclass case expects scores with shape (n_samples, n_classes). + In the multiclass case, the order of the class scores must + correspond to the order of ``labels``, if provided, or else to + the numerical or lexicographical order of the labels in ``y_true``. + If ``y_true`` does not contain all the labels, ``labels`` must be + provided. + + k : int, default=2 + Number of most likely outcomes considered to find the correct label. + + normalize : bool, default=True + If `True`, return the fraction of correctly classified samples. + Otherwise, return the number of correctly classified samples. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If `None`, all samples are given the same weight. + + labels : array-like of shape (n_classes,), default=None + Multiclass only. List of labels that index the classes in ``y_score``. + If ``None``, the numerical or lexicographical order of the labels in + ``y_true`` is used. If ``y_true`` does not contain all the labels, + ``labels`` must be provided. + + Returns + ------- + score : float + The top-k accuracy score. The best performance is 1 with + `normalize == True` and the number of samples with + `normalize == False`. + + See Also + -------- + accuracy_score : Compute the accuracy score. By default, the function will + return the fraction of correct predictions divided by the total number + of predictions. + + Notes + ----- + In cases where two or more labels are assigned equal predicted scores, + the labels with the highest indices will be chosen first. This might + impact the result if the correct label falls after the threshold because + of that. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.metrics import top_k_accuracy_score + >>> y_true = np.array([0, 1, 2, 2]) + >>> y_score = np.array([[0.5, 0.2, 0.2], # 0 is in top 2 + ... [0.3, 0.4, 0.2], # 1 is in top 2 + ... [0.2, 0.4, 0.3], # 2 is in top 2 + ... [0.7, 0.2, 0.1]]) # 2 isn't in top 2 + >>> top_k_accuracy_score(y_true, y_score, k=2) + 0.75 + >>> # Not normalizing gives the number of "correctly" classified samples + >>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False) + 3 + """ + y_true = check_array(y_true, ensure_2d=False, dtype=None) + y_true = column_or_1d(y_true) + y_type = type_of_target(y_true, input_name="y_true") + if y_type == "binary" and labels is not None and len(labels) > 2: + y_type = "multiclass" + if y_type not in {"binary", "multiclass"}: + raise ValueError( + f"y type must be 'binary' or 'multiclass', got '{y_type}' instead." + ) + y_score = check_array(y_score, ensure_2d=False) + if y_type == "binary": + if y_score.ndim == 2 and y_score.shape[1] != 1: + raise ValueError( + "`y_true` is binary while y_score is 2d with" + f" {y_score.shape[1]} classes. If `y_true` does not contain all the" + " labels, `labels` must be provided." + ) + y_score = column_or_1d(y_score) + + check_consistent_length(y_true, y_score, sample_weight) + y_score_n_classes = y_score.shape[1] if y_score.ndim == 2 else 2 + + if labels is None: + classes = _unique(y_true) + n_classes = len(classes) + + if n_classes != y_score_n_classes: + raise ValueError( + f"Number of classes in 'y_true' ({n_classes}) not equal " + f"to the number of classes in 'y_score' ({y_score_n_classes})." + "You can provide a list of all known classes by assigning it " + "to the `labels` parameter." + ) + else: + labels = column_or_1d(labels) + classes = _unique(labels) + n_labels = len(labels) + n_classes = len(classes) + + if n_classes != n_labels: + raise ValueError("Parameter 'labels' must be unique.") + + if not np.array_equal(classes, labels): + raise ValueError("Parameter 'labels' must be ordered.") + + if n_classes != y_score_n_classes: + raise ValueError( + f"Number of given labels ({n_classes}) not equal to the " + f"number of classes in 'y_score' ({y_score_n_classes})." + ) + + if len(np.setdiff1d(y_true, classes)): + raise ValueError("'y_true' contains labels not in parameter 'labels'.") + + if k >= n_classes: + warnings.warn( + ( + f"'k' ({k}) greater than or equal to 'n_classes' ({n_classes}) " + "will result in a perfect score and is therefore meaningless." + ), + UndefinedMetricWarning, + ) + + y_true_encoded = _encode(y_true, uniques=classes) + + if y_type == "binary": + if k == 1: + threshold = 0.5 if y_score.min() >= 0 and y_score.max() <= 1 else 0 + y_pred = (y_score > threshold).astype(np.int64) + hits = y_pred == y_true_encoded + else: + hits = np.ones_like(y_score, dtype=np.bool_) + elif y_type == "multiclass": + sorted_pred = np.argsort(y_score, axis=1, kind="mergesort")[:, ::-1] + hits = (y_true_encoded == sorted_pred[:, :k].T).any(axis=0) + + if normalize: + return np.average(hits, weights=sample_weight) + elif sample_weight is None: + return np.sum(hits) + else: + return np.dot(hits, sample_weight) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_regression.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..807bb782b34a7af1706d75e3822bb645e54909fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_regression.py @@ -0,0 +1,1824 @@ +"""Metrics to assess performance on regression task. + +Functions named as ``*_score`` return a scalar value to maximize: the higher +the better. + +Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: +the lower the better. +""" + +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Olivier Grisel +# Arnaud Joly +# Jochen Wersdorfer +# Lars Buitinck +# Joel Nothman +# Karan Desai +# Noel Dawe +# Manoj Kumar +# Michael Eickenberg +# Konstantin Shmelkov +# Christian Lorentzen +# Ashutosh Hathidara +# Uttam kumar +# Sylvain Marie +# Ohad Michel +# Alejandro Martin Gil +# License: BSD 3 clause + +import warnings +from numbers import Real + +import numpy as np +from scipy.special import xlogy + +from ..exceptions import UndefinedMetricWarning +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.stats import _weighted_percentile +from ..utils.validation import ( + _check_sample_weight, + _num_samples, + check_array, + check_consistent_length, + column_or_1d, +) + +__ALL__ = [ + "max_error", + "mean_absolute_error", + "mean_squared_error", + "mean_squared_log_error", + "median_absolute_error", + "mean_absolute_percentage_error", + "mean_pinball_loss", + "r2_score", + "root_mean_squared_log_error", + "root_mean_squared_error", + "explained_variance_score", + "mean_tweedie_deviance", + "mean_poisson_deviance", + "mean_gamma_deviance", + "d2_tweedie_score", + "d2_pinball_score", + "d2_absolute_error_score", +] + + +def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"): + """Check that y_true and y_pred belong to the same regression task. + + Parameters + ---------- + y_true : array-like + + y_pred : array-like + + multioutput : array-like or string in ['raw_values', uniform_average', + 'variance_weighted'] or None + None is accepted due to backward compatibility of r2_score(). + + dtype : str or list, default="numeric" + the dtype argument passed to check_array. + + Returns + ------- + type_true : one of {'continuous', continuous-multioutput'} + The type of the true target data, as output by + 'utils.multiclass.type_of_target'. + + y_true : array-like of shape (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples, n_outputs) + Estimated target values. + + multioutput : array-like of shape (n_outputs) or string in ['raw_values', + uniform_average', 'variance_weighted'] or None + Custom output weights if ``multioutput`` is array-like or + just the corresponding argument if ``multioutput`` is a + correct keyword. + """ + check_consistent_length(y_true, y_pred) + y_true = check_array(y_true, ensure_2d=False, dtype=dtype) + y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) + + if y_true.ndim == 1: + y_true = y_true.reshape((-1, 1)) + + if y_pred.ndim == 1: + y_pred = y_pred.reshape((-1, 1)) + + if y_true.shape[1] != y_pred.shape[1]: + raise ValueError( + "y_true and y_pred have different number of output ({0}!={1})".format( + y_true.shape[1], y_pred.shape[1] + ) + ) + + n_outputs = y_true.shape[1] + allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted") + if isinstance(multioutput, str): + if multioutput not in allowed_multioutput_str: + raise ValueError( + "Allowed 'multioutput' string values are {}. " + "You provided multioutput={!r}".format( + allowed_multioutput_str, multioutput + ) + ) + elif multioutput is not None: + multioutput = check_array(multioutput, ensure_2d=False) + if n_outputs == 1: + raise ValueError("Custom weights are useful only in multi-output cases.") + elif n_outputs != len(multioutput): + raise ValueError( + "There must be equally many custom weights (%d) as outputs (%d)." + % (len(multioutput), n_outputs) + ) + y_type = "continuous" if n_outputs == 1 else "continuous-multioutput" + + return y_type, y_true, y_pred, multioutput + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + }, + prefer_skip_nested_validation=True, +) +def mean_absolute_error( + y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" +): + """Mean absolute error regression loss. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + Returns + ------- + loss : float or ndarray of floats + If multioutput is 'raw_values', then mean absolute error is returned + for each output separately. + If multioutput is 'uniform_average' or an ndarray of weights, then the + weighted average of all output errors is returned. + + MAE output is non-negative floating point. The best value is 0.0. + + Examples + -------- + >>> from sklearn.metrics import mean_absolute_error + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> mean_absolute_error(y_true, y_pred) + 0.5 + >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] + >>> y_pred = [[0, 2], [-1, 2], [8, -5]] + >>> mean_absolute_error(y_true, y_pred) + 0.75 + >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') + array([0.5, 1. ]) + >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) + 0.85... + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0) + if isinstance(multioutput, str): + if multioutput == "raw_values": + return output_errors + elif multioutput == "uniform_average": + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "alpha": [Interval(Real, 0, 1, closed="both")], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + }, + prefer_skip_nested_validation=True, +) +def mean_pinball_loss( + y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" +): + """Pinball loss for quantile regression. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + alpha : float, slope of the pinball loss, default=0.5, + This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`, + `alpha=0.95` is minimized by estimators of the 95th percentile. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + Returns + ------- + loss : float or ndarray of floats + If multioutput is 'raw_values', then mean absolute error is returned + for each output separately. + If multioutput is 'uniform_average' or an ndarray of weights, then the + weighted average of all output errors is returned. + + The pinball loss output is a non-negative floating point. The best + value is 0.0. + + Examples + -------- + >>> from sklearn.metrics import mean_pinball_loss + >>> y_true = [1, 2, 3] + >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1) + 0.03... + >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1) + 0.3... + >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9) + 0.3... + >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9) + 0.03... + >>> mean_pinball_loss(y_true, y_true, alpha=0.1) + 0.0 + >>> mean_pinball_loss(y_true, y_true, alpha=0.9) + 0.0 + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + diff = y_true - y_pred + sign = (diff >= 0).astype(diff.dtype) + loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff + output_errors = np.average(loss, weights=sample_weight, axis=0) + + if isinstance(multioutput, str) and multioutput == "raw_values": + return output_errors + + if isinstance(multioutput, str) and multioutput == "uniform_average": + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + }, + prefer_skip_nested_validation=True, +) +def mean_absolute_percentage_error( + y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" +): + """Mean absolute percentage error (MAPE) regression loss. + + Note here that the output is not a percentage in the range [0, 100] + and a value of 100 does not mean 100% but 1e2. Furthermore, the output + can be arbitrarily high when `y_true` is small (which is specific to the + metric) or when `abs(y_true - y_pred)` is large (which is common for most + regression metrics). Read more in the + :ref:`User Guide `. + + .. versionadded:: 0.24 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + If input is list then the shape must be (n_outputs,). + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + Returns + ------- + loss : float or ndarray of floats + If multioutput is 'raw_values', then mean absolute percentage error + is returned for each output separately. + If multioutput is 'uniform_average' or an ndarray of weights, then the + weighted average of all output errors is returned. + + MAPE output is non-negative floating point. The best value is 0.0. + But note that bad predictions can lead to arbitrarily large + MAPE values, especially if some `y_true` values are very close to zero. + Note that we return a large value instead of `inf` when `y_true` is zero. + + Examples + -------- + >>> from sklearn.metrics import mean_absolute_percentage_error + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.3273... + >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] + >>> y_pred = [[0, 2], [-1, 2], [8, -5]] + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.5515... + >>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7]) + 0.6198... + >>> # the value when some element of the y_true is zero is arbitrarily high because + >>> # of the division by epsilon + >>> y_true = [1., 0., 2.4, 7.] + >>> y_pred = [1.2, 0.1, 2.4, 8.] + >>> mean_absolute_percentage_error(y_true, y_pred) + 112589990684262.48 + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + epsilon = np.finfo(np.float64).eps + mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon) + output_errors = np.average(mape, weights=sample_weight, axis=0) + if isinstance(multioutput, str): + if multioutput == "raw_values": + return output_errors + elif multioutput == "uniform_average": + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + "squared": [Hidden(StrOptions({"deprecated"})), "boolean"], + }, + prefer_skip_nested_validation=True, +) +def mean_squared_error( + y_true, + y_pred, + *, + sample_weight=None, + multioutput="uniform_average", + squared="deprecated", +): + """Mean squared error regression loss. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + squared : bool, default=True + If True returns MSE value, if False returns RMSE value. + + .. deprecated:: 1.4 + `squared` is deprecated in 1.4 and will be removed in 1.6. + Use :func:`~sklearn.metrics.root_mean_squared_error` + instead to calculate the root mean squared error. + + Returns + ------- + loss : float or ndarray of floats + A non-negative floating point value (the best value is 0.0), or an + array of floating point values, one for each individual target. + + Examples + -------- + >>> from sklearn.metrics import mean_squared_error + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> mean_squared_error(y_true, y_pred) + 0.375 + >>> y_true = [[0.5, 1],[-1, 1],[7, -6]] + >>> y_pred = [[0, 2],[-1, 2],[8, -5]] + >>> mean_squared_error(y_true, y_pred) + 0.708... + >>> mean_squared_error(y_true, y_pred, multioutput='raw_values') + array([0.41666667, 1. ]) + >>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7]) + 0.825... + """ + # TODO(1.6): remove + if squared != "deprecated": + warnings.warn( + ( + "'squared' is deprecated in version 1.4 and " + "will be removed in 1.6. To calculate the " + "root mean squared error, use the function" + "'root_mean_squared_error'." + ), + FutureWarning, + ) + if not squared: + return root_mean_squared_error( + y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput + ) + + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + output_errors = np.average((y_true - y_pred) ** 2, axis=0, weights=sample_weight) + + if isinstance(multioutput, str): + if multioutput == "raw_values": + return output_errors + elif multioutput == "uniform_average": + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + }, + prefer_skip_nested_validation=True, +) +def root_mean_squared_error( + y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" +): + """Root mean squared error regression loss. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + Returns + ------- + loss : float or ndarray of floats + A non-negative floating point value (the best value is 0.0), or an + array of floating point values, one for each individual target. + + Examples + -------- + >>> from sklearn.metrics import root_mean_squared_error + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> root_mean_squared_error(y_true, y_pred) + 0.612... + >>> y_true = [[0.5, 1],[-1, 1],[7, -6]] + >>> y_pred = [[0, 2],[-1, 2],[8, -5]] + >>> root_mean_squared_error(y_true, y_pred) + 0.822... + """ + output_errors = np.sqrt( + mean_squared_error( + y_true, y_pred, sample_weight=sample_weight, multioutput="raw_values" + ) + ) + + if isinstance(multioutput, str): + if multioutput == "raw_values": + return output_errors + elif multioutput == "uniform_average": + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + "squared": [Hidden(StrOptions({"deprecated"})), "boolean"], + }, + prefer_skip_nested_validation=True, +) +def mean_squared_log_error( + y_true, + y_pred, + *, + sample_weight=None, + multioutput="uniform_average", + squared="deprecated", +): + """Mean squared logarithmic error regression loss. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + + 'raw_values' : + Returns a full set of errors when the input is of multioutput + format. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + squared : bool, default=True + If True returns MSLE (mean squared log error) value. + If False returns RMSLE (root mean squared log error) value. + + .. deprecated:: 1.4 + `squared` is deprecated in 1.4 and will be removed in 1.6. + Use :func:`~sklearn.metrics.root_mean_squared_log_error` + instead to calculate the root mean squared logarithmic error. + + Returns + ------- + loss : float or ndarray of floats + A non-negative floating point value (the best value is 0.0), or an + array of floating point values, one for each individual target. + + Examples + -------- + >>> from sklearn.metrics import mean_squared_log_error + >>> y_true = [3, 5, 2.5, 7] + >>> y_pred = [2.5, 5, 4, 8] + >>> mean_squared_log_error(y_true, y_pred) + 0.039... + >>> y_true = [[0.5, 1], [1, 2], [7, 6]] + >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]] + >>> mean_squared_log_error(y_true, y_pred) + 0.044... + >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values') + array([0.00462428, 0.08377444]) + >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) + 0.060... + """ + # TODO(1.6): remove + if squared != "deprecated": + warnings.warn( + ( + "'squared' is deprecated in version 1.4 and " + "will be removed in 1.6. To calculate the " + "root mean squared logarithmic error, use the function" + "'root_mean_squared_log_error'." + ), + FutureWarning, + ) + if not squared: + return root_mean_squared_log_error( + y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput + ) + + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + + if (y_true < 0).any() or (y_pred < 0).any(): + raise ValueError( + "Mean Squared Logarithmic Error cannot be used when " + "targets contain negative values." + ) + + return mean_squared_error( + np.log1p(y_true), + np.log1p(y_pred), + sample_weight=sample_weight, + multioutput=multioutput, + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + }, + prefer_skip_nested_validation=True, +) +def root_mean_squared_log_error( + y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" +): + """Root mean squared logarithmic error regression loss. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.4 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + + Defines aggregating of multiple output values. + Array-like value defines weights used to average errors. + + 'raw_values' : + Returns a full set of errors when the input is of multioutput + format. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + Returns + ------- + loss : float or ndarray of floats + A non-negative floating point value (the best value is 0.0), or an + array of floating point values, one for each individual target. + + Examples + -------- + >>> from sklearn.metrics import root_mean_squared_log_error + >>> y_true = [3, 5, 2.5, 7] + >>> y_pred = [2.5, 5, 4, 8] + >>> root_mean_squared_log_error(y_true, y_pred) + 0.199... + """ + _, y_true, y_pred, multioutput = _check_reg_targets(y_true, y_pred, multioutput) + check_consistent_length(y_true, y_pred, sample_weight) + + if (y_true < 0).any() or (y_pred < 0).any(): + raise ValueError( + "Root Mean Squared Logarithmic Error cannot be used when " + "targets contain negative values." + ) + + return root_mean_squared_error( + np.log1p(y_true), + np.log1p(y_pred), + sample_weight=sample_weight, + multioutput=multioutput, + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def median_absolute_error( + y_true, y_pred, *, multioutput="uniform_average", sample_weight=None +): + """Median absolute error regression loss. + + Median absolute error output is non-negative floating point. The best value + is 0.0. Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. Array-like value defines + weights used to average errors. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Errors of all outputs are averaged with uniform weight. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + .. versionadded:: 0.24 + + Returns + ------- + loss : float or ndarray of floats + If multioutput is 'raw_values', then mean absolute error is returned + for each output separately. + If multioutput is 'uniform_average' or an ndarray of weights, then the + weighted average of all output errors is returned. + + Examples + -------- + >>> from sklearn.metrics import median_absolute_error + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> median_absolute_error(y_true, y_pred) + 0.5 + >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] + >>> y_pred = [[0, 2], [-1, 2], [8, -5]] + >>> median_absolute_error(y_true, y_pred) + 0.75 + >>> median_absolute_error(y_true, y_pred, multioutput='raw_values') + array([0.5, 1. ]) + >>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) + 0.85 + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + if sample_weight is None: + output_errors = np.median(np.abs(y_pred - y_true), axis=0) + else: + sample_weight = _check_sample_weight(sample_weight, y_pred) + output_errors = _weighted_percentile( + np.abs(y_pred - y_true), sample_weight=sample_weight + ) + if isinstance(multioutput, str): + if multioutput == "raw_values": + return output_errors + elif multioutput == "uniform_average": + # pass None as weights to np.average: uniform mean + multioutput = None + + return np.average(output_errors, weights=multioutput) + + +def _assemble_r2_explained_variance( + numerator, denominator, n_outputs, multioutput, force_finite +): + """Common part used by explained variance score and :math:`R^2` score.""" + + nonzero_denominator = denominator != 0 + + if not force_finite: + # Standard formula, that may lead to NaN or -Inf + output_scores = 1 - (numerator / denominator) + else: + nonzero_numerator = numerator != 0 + # Default = Zero Numerator = perfect predictions. Set to 1.0 + # (note: even if denominator is zero, thus avoiding NaN scores) + output_scores = np.ones([n_outputs]) + # Non-zero Numerator and Non-zero Denominator: use the formula + valid_score = nonzero_denominator & nonzero_numerator + output_scores[valid_score] = 1 - ( + numerator[valid_score] / denominator[valid_score] + ) + # Non-zero Numerator and Zero Denominator: + # arbitrary set to 0.0 to avoid -inf scores + output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 + + if isinstance(multioutput, str): + if multioutput == "raw_values": + # return scores individually + return output_scores + elif multioutput == "uniform_average": + # Passing None as weights to np.average results is uniform mean + avg_weights = None + elif multioutput == "variance_weighted": + avg_weights = denominator + if not np.any(nonzero_denominator): + # All weights are zero, np.average would raise a ZeroDiv error. + # This only happens when all y are constant (or 1-element long) + # Since weights are all equal, fall back to uniform weights. + avg_weights = None + else: + avg_weights = multioutput + + return np.average(output_scores, weights=avg_weights) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [ + StrOptions({"raw_values", "uniform_average", "variance_weighted"}), + "array-like", + ], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def explained_variance_score( + y_true, + y_pred, + *, + sample_weight=None, + multioutput="uniform_average", + force_finite=True, +): + """Explained variance regression score function. + + Best possible score is 1.0, lower values are worse. + + In the particular case when ``y_true`` is constant, the explained variance + score is not finite: it is either ``NaN`` (perfect predictions) or + ``-Inf`` (imperfect predictions). To prevent such non-finite numbers to + pollute higher-level experiments such as a grid search cross-validation, + by default these cases are replaced with 1.0 (perfect predictions) or 0.0 + (imperfect predictions) respectively. If ``force_finite`` + is set to ``False``, this score falls back on the original :math:`R^2` + definition. + + .. note:: + The Explained Variance score is similar to the + :func:`R^2 score `, with the notable difference that it + does not account for systematic offsets in the prediction. Most often + the :func:`R^2 score ` should be preferred. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or \ + array-like of shape (n_outputs,), default='uniform_average' + Defines aggregating of multiple output scores. + Array-like value defines weights used to average scores. + + 'raw_values' : + Returns a full set of scores in case of multioutput input. + + 'uniform_average' : + Scores of all outputs are averaged with uniform weight. + + 'variance_weighted' : + Scores of all outputs are averaged, weighted by the variances + of each individual output. + + force_finite : bool, default=True + Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant + data should be replaced with real numbers (``1.0`` if prediction is + perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting + for hyperparameters' search procedures (e.g. grid search + cross-validation). + + .. versionadded:: 1.1 + + Returns + ------- + score : float or ndarray of floats + The explained variance or ndarray if 'multioutput' is 'raw_values'. + + See Also + -------- + r2_score : + Similar metric, but accounting for systematic offsets in + prediction. + + Notes + ----- + This is not a symmetric function. + + Examples + -------- + >>> from sklearn.metrics import explained_variance_score + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> explained_variance_score(y_true, y_pred) + 0.957... + >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] + >>> y_pred = [[0, 2], [-1, 2], [8, -5]] + >>> explained_variance_score(y_true, y_pred, multioutput='uniform_average') + 0.983... + >>> y_true = [-2, -2, -2] + >>> y_pred = [-2, -2, -2] + >>> explained_variance_score(y_true, y_pred) + 1.0 + >>> explained_variance_score(y_true, y_pred, force_finite=False) + nan + >>> y_true = [-2, -2, -2] + >>> y_pred = [-2, -2, -2 + 1e-8] + >>> explained_variance_score(y_true, y_pred) + 0.0 + >>> explained_variance_score(y_true, y_pred, force_finite=False) + -inf + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + + y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0) + numerator = np.average( + (y_true - y_pred - y_diff_avg) ** 2, weights=sample_weight, axis=0 + ) + + y_true_avg = np.average(y_true, weights=sample_weight, axis=0) + denominator = np.average((y_true - y_true_avg) ** 2, weights=sample_weight, axis=0) + + return _assemble_r2_explained_variance( + numerator=numerator, + denominator=denominator, + n_outputs=y_true.shape[1], + multioutput=multioutput, + force_finite=force_finite, + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [ + StrOptions({"raw_values", "uniform_average", "variance_weighted"}), + "array-like", + None, + ], + "force_finite": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def r2_score( + y_true, + y_pred, + *, + sample_weight=None, + multioutput="uniform_average", + force_finite=True, +): + """:math:`R^2` (coefficient of determination) regression score function. + + Best possible score is 1.0 and it can be negative (because the + model can be arbitrarily worse). In the general case when the true y is + non-constant, a constant model that always predicts the average y + disregarding the input features would get a :math:`R^2` score of 0.0. + + In the particular case when ``y_true`` is constant, the :math:`R^2` score + is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf`` + (imperfect predictions). To prevent such non-finite numbers to pollute + higher-level experiments such as a grid search cross-validation, by default + these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect + predictions) respectively. You can set ``force_finite`` to ``False`` to + prevent this fix from happening. + + Note: when the prediction residuals have zero mean, the :math:`R^2` score + is identical to the + :func:`Explained Variance score `. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, \ + array-like of shape (n_outputs,) or None, default='uniform_average' + + Defines aggregating of multiple output scores. + Array-like value defines weights used to average scores. + Default is "uniform_average". + + 'raw_values' : + Returns a full set of scores in case of multioutput input. + + 'uniform_average' : + Scores of all outputs are averaged with uniform weight. + + 'variance_weighted' : + Scores of all outputs are averaged, weighted by the variances + of each individual output. + + .. versionchanged:: 0.19 + Default value of multioutput is 'uniform_average'. + + force_finite : bool, default=True + Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant + data should be replaced with real numbers (``1.0`` if prediction is + perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting + for hyperparameters' search procedures (e.g. grid search + cross-validation). + + .. versionadded:: 1.1 + + Returns + ------- + z : float or ndarray of floats + The :math:`R^2` score or ndarray of scores if 'multioutput' is + 'raw_values'. + + Notes + ----- + This is not a symmetric function. + + Unlike most other scores, :math:`R^2` score may be negative (it need not + actually be the square of a quantity R). + + This metric is not well-defined for single samples and will return a NaN + value if n_samples is less than two. + + References + ---------- + .. [1] `Wikipedia entry on the Coefficient of determination + `_ + + Examples + -------- + >>> from sklearn.metrics import r2_score + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> r2_score(y_true, y_pred) + 0.948... + >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] + >>> y_pred = [[0, 2], [-1, 2], [8, -5]] + >>> r2_score(y_true, y_pred, + ... multioutput='variance_weighted') + 0.938... + >>> y_true = [1, 2, 3] + >>> y_pred = [1, 2, 3] + >>> r2_score(y_true, y_pred) + 1.0 + >>> y_true = [1, 2, 3] + >>> y_pred = [2, 2, 2] + >>> r2_score(y_true, y_pred) + 0.0 + >>> y_true = [1, 2, 3] + >>> y_pred = [3, 2, 1] + >>> r2_score(y_true, y_pred) + -3.0 + >>> y_true = [-2, -2, -2] + >>> y_pred = [-2, -2, -2] + >>> r2_score(y_true, y_pred) + 1.0 + >>> r2_score(y_true, y_pred, force_finite=False) + nan + >>> y_true = [-2, -2, -2] + >>> y_pred = [-2, -2, -2 + 1e-8] + >>> r2_score(y_true, y_pred) + 0.0 + >>> r2_score(y_true, y_pred, force_finite=False) + -inf + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + + if _num_samples(y_pred) < 2: + msg = "R^2 score is not well-defined with less than two samples." + warnings.warn(msg, UndefinedMetricWarning) + return float("nan") + + if sample_weight is not None: + sample_weight = column_or_1d(sample_weight) + weight = sample_weight[:, np.newaxis] + else: + weight = 1.0 + + numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64) + denominator = ( + weight * (y_true - np.average(y_true, axis=0, weights=sample_weight)) ** 2 + ).sum(axis=0, dtype=np.float64) + + return _assemble_r2_explained_variance( + numerator=numerator, + denominator=denominator, + n_outputs=y_true.shape[1], + multioutput=multioutput, + force_finite=force_finite, + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def max_error(y_true, y_pred): + """ + The max_error metric calculates the maximum residual error. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) + Estimated target values. + + Returns + ------- + max_error : float + A positive floating point value (the best value is 0.0). + + Examples + -------- + >>> from sklearn.metrics import max_error + >>> y_true = [3, 2, 7, 1] + >>> y_pred = [4, 2, 7, 1] + >>> max_error(y_true, y_pred) + 1 + """ + y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None) + if y_type == "continuous-multioutput": + raise ValueError("Multioutput not supported in max_error") + return np.max(np.abs(y_true - y_pred)) + + +def _mean_tweedie_deviance(y_true, y_pred, sample_weight, power): + """Mean Tweedie deviance regression loss.""" + p = power + if p < 0: + # 'Extreme stable', y any real number, y_pred > 0 + dev = 2 * ( + np.power(np.maximum(y_true, 0), 2 - p) / ((1 - p) * (2 - p)) + - y_true * np.power(y_pred, 1 - p) / (1 - p) + + np.power(y_pred, 2 - p) / (2 - p) + ) + elif p == 0: + # Normal distribution, y and y_pred any real number + dev = (y_true - y_pred) ** 2 + elif p == 1: + # Poisson distribution + dev = 2 * (xlogy(y_true, y_true / y_pred) - y_true + y_pred) + elif p == 2: + # Gamma distribution + dev = 2 * (np.log(y_pred / y_true) + y_true / y_pred - 1) + else: + dev = 2 * ( + np.power(y_true, 2 - p) / ((1 - p) * (2 - p)) + - y_true * np.power(y_pred, 1 - p) / (1 - p) + + np.power(y_pred, 2 - p) / (2 - p) + ) + + return np.average(dev, weights=sample_weight) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "power": [ + Interval(Real, None, 0, closed="right"), + Interval(Real, 1, None, closed="left"), + ], + }, + prefer_skip_nested_validation=True, +) +def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=None, power=0): + """Mean Tweedie deviance regression loss. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + power : float, default=0 + Tweedie power parameter. Either power <= 0 or power >= 1. + + The higher `p` the less weight is given to extreme + deviations between true and predicted targets. + + - power < 0: Extreme stable distribution. Requires: y_pred > 0. + - power = 0 : Normal distribution, output corresponds to + mean_squared_error. y_true and y_pred can be any real numbers. + - power = 1 : Poisson distribution. Requires: y_true >= 0 and + y_pred > 0. + - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0 + and y_pred > 0. + - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0. + - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0 + and y_pred > 0. + - otherwise : Positive stable distribution. Requires: y_true > 0 + and y_pred > 0. + + Returns + ------- + loss : float + A non-negative floating point value (the best value is 0.0). + + Examples + -------- + >>> from sklearn.metrics import mean_tweedie_deviance + >>> y_true = [2, 0, 1, 4] + >>> y_pred = [0.5, 0.5, 2., 2.] + >>> mean_tweedie_deviance(y_true, y_pred, power=1) + 1.4260... + """ + y_type, y_true, y_pred, _ = _check_reg_targets( + y_true, y_pred, None, dtype=[np.float64, np.float32] + ) + if y_type == "continuous-multioutput": + raise ValueError("Multioutput not supported in mean_tweedie_deviance") + check_consistent_length(y_true, y_pred, sample_weight) + + if sample_weight is not None: + sample_weight = column_or_1d(sample_weight) + sample_weight = sample_weight[:, np.newaxis] + + message = f"Mean Tweedie deviance error with power={power} can only be used on " + if power < 0: + # 'Extreme stable', y any real number, y_pred > 0 + if (y_pred <= 0).any(): + raise ValueError(message + "strictly positive y_pred.") + elif power == 0: + # Normal, y and y_pred can be any real number + pass + elif 1 <= power < 2: + # Poisson and compound Poisson distribution, y >= 0, y_pred > 0 + if (y_true < 0).any() or (y_pred <= 0).any(): + raise ValueError(message + "non-negative y and strictly positive y_pred.") + elif power >= 2: + # Gamma and Extreme stable distribution, y and y_pred > 0 + if (y_true <= 0).any() or (y_pred <= 0).any(): + raise ValueError(message + "strictly positive y and y_pred.") + else: # pragma: nocover + # Unreachable statement + raise ValueError + + return _mean_tweedie_deviance( + y_true, y_pred, sample_weight=sample_weight, power=power + ) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def mean_poisson_deviance(y_true, y_pred, *, sample_weight=None): + """Mean Poisson deviance regression loss. + + Poisson deviance is equivalent to the Tweedie deviance with + the power parameter `power=1`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. Requires y_true >= 0. + + y_pred : array-like of shape (n_samples,) + Estimated target values. Requires y_pred > 0. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + loss : float + A non-negative floating point value (the best value is 0.0). + + Examples + -------- + >>> from sklearn.metrics import mean_poisson_deviance + >>> y_true = [2, 0, 1, 4] + >>> y_pred = [0.5, 0.5, 2., 2.] + >>> mean_poisson_deviance(y_true, y_pred) + 1.4260... + """ + return mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=1) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def mean_gamma_deviance(y_true, y_pred, *, sample_weight=None): + """Mean Gamma deviance regression loss. + + Gamma deviance is equivalent to the Tweedie deviance with + the power parameter `power=2`. It is invariant to scaling of + the target variable, and measures relative errors. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. Requires y_true > 0. + + y_pred : array-like of shape (n_samples,) + Estimated target values. Requires y_pred > 0. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + loss : float + A non-negative floating point value (the best value is 0.0). + + Examples + -------- + >>> from sklearn.metrics import mean_gamma_deviance + >>> y_true = [2, 0.5, 1, 4] + >>> y_pred = [0.5, 0.5, 2., 2.] + >>> mean_gamma_deviance(y_true, y_pred) + 1.0568... + """ + return mean_tweedie_deviance(y_true, y_pred, sample_weight=sample_weight, power=2) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "power": [ + Interval(Real, None, 0, closed="right"), + Interval(Real, 1, None, closed="left"), + ], + }, + prefer_skip_nested_validation=True, +) +def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0): + """ + :math:`D^2` regression score function, fraction of Tweedie deviance explained. + + Best possible score is 1.0 and it can be negative (because the model can be + arbitrarily worse). A model that always uses the empirical mean of `y_true` as + constant prediction, disregarding the input features, gets a D^2 score of 0.0. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + power : float, default=0 + Tweedie power parameter. Either power <= 0 or power >= 1. + + The higher `p` the less weight is given to extreme + deviations between true and predicted targets. + + - power < 0: Extreme stable distribution. Requires: y_pred > 0. + - power = 0 : Normal distribution, output corresponds to r2_score. + y_true and y_pred can be any real numbers. + - power = 1 : Poisson distribution. Requires: y_true >= 0 and + y_pred > 0. + - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0 + and y_pred > 0. + - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0. + - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0 + and y_pred > 0. + - otherwise : Positive stable distribution. Requires: y_true > 0 + and y_pred > 0. + + Returns + ------- + z : float or ndarray of floats + The D^2 score. + + Notes + ----- + This is not a symmetric function. + + Like R^2, D^2 score may be negative (it need not actually be the square of + a quantity D). + + This metric is not well-defined for single samples and will return a NaN + value if n_samples is less than two. + + References + ---------- + .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J. + Wainwright. "Statistical Learning with Sparsity: The Lasso and + Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/ + + Examples + -------- + >>> from sklearn.metrics import d2_tweedie_score + >>> y_true = [0.5, 1, 2.5, 7] + >>> y_pred = [1, 1, 5, 3.5] + >>> d2_tweedie_score(y_true, y_pred) + 0.285... + >>> d2_tweedie_score(y_true, y_pred, power=1) + 0.487... + >>> d2_tweedie_score(y_true, y_pred, power=2) + 0.630... + >>> d2_tweedie_score(y_true, y_true, power=2) + 1.0 + """ + y_type, y_true, y_pred, _ = _check_reg_targets( + y_true, y_pred, None, dtype=[np.float64, np.float32] + ) + if y_type == "continuous-multioutput": + raise ValueError("Multioutput not supported in d2_tweedie_score") + + if _num_samples(y_pred) < 2: + msg = "D^2 score is not well-defined with less than two samples." + warnings.warn(msg, UndefinedMetricWarning) + return float("nan") + + y_true, y_pred = np.squeeze(y_true), np.squeeze(y_pred) + numerator = mean_tweedie_deviance( + y_true, y_pred, sample_weight=sample_weight, power=power + ) + + y_avg = np.average(y_true, weights=sample_weight) + denominator = _mean_tweedie_deviance( + y_true, y_avg, sample_weight=sample_weight, power=power + ) + + return 1 - numerator / denominator + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "alpha": [Interval(Real, 0, 1, closed="both")], + "multioutput": [ + StrOptions({"raw_values", "uniform_average"}), + "array-like", + ], + }, + prefer_skip_nested_validation=True, +) +def d2_pinball_score( + y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" +): + """ + :math:`D^2` regression score function, fraction of pinball loss explained. + + Best possible score is 1.0 and it can be negative (because the model can be + arbitrarily worse). A model that always uses the empirical alpha-quantile of + `y_true` as constant prediction, disregarding the input features, + gets a :math:`D^2` score of 0.0. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + alpha : float, default=0.5 + Slope of the pinball deviance. It determines the quantile level alpha + for which the pinball deviance and also D2 are optimal. + The default `alpha=0.5` is equivalent to `d2_absolute_error_score`. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. + Array-like value defines weights used to average scores. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Scores of all outputs are averaged with uniform weight. + + Returns + ------- + score : float or ndarray of floats + The :math:`D^2` score with a pinball deviance + or ndarray of scores if `multioutput='raw_values'`. + + Notes + ----- + Like :math:`R^2`, :math:`D^2` score may be negative + (it need not actually be the square of a quantity D). + + This metric is not well-defined for a single point and will return a NaN + value if n_samples is less than two. + + References + ---------- + .. [1] Eq. (7) of `Koenker, Roger; Machado, José A. F. (1999). + "Goodness of Fit and Related Inference Processes for Quantile Regression" + `_ + .. [2] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J. + Wainwright. "Statistical Learning with Sparsity: The Lasso and + Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/ + + Examples + -------- + >>> from sklearn.metrics import d2_pinball_score + >>> y_true = [1, 2, 3] + >>> y_pred = [1, 3, 3] + >>> d2_pinball_score(y_true, y_pred) + 0.5 + >>> d2_pinball_score(y_true, y_pred, alpha=0.9) + 0.772... + >>> d2_pinball_score(y_true, y_pred, alpha=0.1) + -1.045... + >>> d2_pinball_score(y_true, y_true, alpha=0.1) + 1.0 + """ + y_type, y_true, y_pred, multioutput = _check_reg_targets( + y_true, y_pred, multioutput + ) + check_consistent_length(y_true, y_pred, sample_weight) + + if _num_samples(y_pred) < 2: + msg = "D^2 score is not well-defined with less than two samples." + warnings.warn(msg, UndefinedMetricWarning) + return float("nan") + + numerator = mean_pinball_loss( + y_true, + y_pred, + sample_weight=sample_weight, + alpha=alpha, + multioutput="raw_values", + ) + + if sample_weight is None: + y_quantile = np.tile( + np.percentile(y_true, q=alpha * 100, axis=0), (len(y_true), 1) + ) + else: + sample_weight = _check_sample_weight(sample_weight, y_true) + y_quantile = np.tile( + _weighted_percentile( + y_true, sample_weight=sample_weight, percentile=alpha * 100 + ), + (len(y_true), 1), + ) + + denominator = mean_pinball_loss( + y_true, + y_quantile, + sample_weight=sample_weight, + alpha=alpha, + multioutput="raw_values", + ) + + nonzero_numerator = numerator != 0 + nonzero_denominator = denominator != 0 + valid_score = nonzero_numerator & nonzero_denominator + output_scores = np.ones(y_true.shape[1]) + + output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) + output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 + + if isinstance(multioutput, str): + if multioutput == "raw_values": + # return scores individually + return output_scores + else: # multioutput == "uniform_average" + # passing None as weights to np.average results in uniform mean + avg_weights = None + else: + avg_weights = multioutput + + return np.average(output_scores, weights=avg_weights) + + +@validate_params( + { + "y_true": ["array-like"], + "y_pred": ["array-like"], + "sample_weight": ["array-like", None], + "multioutput": [ + StrOptions({"raw_values", "uniform_average"}), + "array-like", + ], + }, + prefer_skip_nested_validation=True, +) +def d2_absolute_error_score( + y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" +): + """ + :math:`D^2` regression score function, fraction of absolute error explained. + + Best possible score is 1.0 and it can be negative (because the model can be + arbitrarily worse). A model that always uses the empirical median of `y_true` + as constant prediction, disregarding the input features, + gets a :math:`D^2` score of 0.0. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) + Ground truth (correct) target values. + + y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) + Estimated target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ + (n_outputs,), default='uniform_average' + Defines aggregating of multiple output values. + Array-like value defines weights used to average scores. + + 'raw_values' : + Returns a full set of errors in case of multioutput input. + + 'uniform_average' : + Scores of all outputs are averaged with uniform weight. + + Returns + ------- + score : float or ndarray of floats + The :math:`D^2` score with an absolute error deviance + or ndarray of scores if 'multioutput' is 'raw_values'. + + Notes + ----- + Like :math:`R^2`, :math:`D^2` score may be negative + (it need not actually be the square of a quantity D). + + This metric is not well-defined for single samples and will return a NaN + value if n_samples is less than two. + + References + ---------- + .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J. + Wainwright. "Statistical Learning with Sparsity: The Lasso and + Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/ + + Examples + -------- + >>> from sklearn.metrics import d2_absolute_error_score + >>> y_true = [3, -0.5, 2, 7] + >>> y_pred = [2.5, 0.0, 2, 8] + >>> d2_absolute_error_score(y_true, y_pred) + 0.764... + >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] + >>> y_pred = [[0, 2], [-1, 2], [8, -5]] + >>> d2_absolute_error_score(y_true, y_pred, multioutput='uniform_average') + 0.691... + >>> d2_absolute_error_score(y_true, y_pred, multioutput='raw_values') + array([0.8125 , 0.57142857]) + >>> y_true = [1, 2, 3] + >>> y_pred = [1, 2, 3] + >>> d2_absolute_error_score(y_true, y_pred) + 1.0 + >>> y_true = [1, 2, 3] + >>> y_pred = [2, 2, 2] + >>> d2_absolute_error_score(y_true, y_pred) + 0.0 + >>> y_true = [1, 2, 3] + >>> y_pred = [3, 2, 1] + >>> d2_absolute_error_score(y_true, y_pred) + -1.0 + """ + return d2_pinball_score( + y_true, y_pred, sample_weight=sample_weight, alpha=0.5, multioutput=multioutput + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/_scorer.py b/venv/lib/python3.10/site-packages/sklearn/metrics/_scorer.py new file mode 100644 index 0000000000000000000000000000000000000000..5f50141a82112d7aec82177859ef9e9f8284fc60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/_scorer.py @@ -0,0 +1,954 @@ +""" +The :mod:`sklearn.metrics.scorer` submodule implements a flexible +interface for model selection and evaluation using +arbitrary score functions. + +A scorer object is a callable that can be passed to +:class:`~sklearn.model_selection.GridSearchCV` or +:func:`sklearn.model_selection.cross_val_score` as the ``scoring`` +parameter, to specify how a model should be evaluated. + +The signature of the call is ``(estimator, X, y)`` where ``estimator`` +is the model to be evaluated, ``X`` is the test data and ``y`` is the +ground truth labeling (or ``None`` in the case of unsupervised models). +""" + +# Authors: Andreas Mueller +# Lars Buitinck +# Arnaud Joly +# License: Simplified BSD + +import copy +import warnings +from collections import Counter +from functools import partial +from inspect import signature +from traceback import format_exc + +from ..base import is_regressor +from ..utils import Bunch +from ..utils._param_validation import HasMethods, Hidden, StrOptions, validate_params +from ..utils._response import _get_response_values +from ..utils.metadata_routing import ( + MetadataRequest, + MetadataRouter, + _MetadataRequester, + _raise_for_params, + _routing_enabled, + get_routing_for_object, + process_routing, +) +from ..utils.validation import _check_response_method +from . import ( + accuracy_score, + average_precision_score, + balanced_accuracy_score, + brier_score_loss, + class_likelihood_ratios, + explained_variance_score, + f1_score, + jaccard_score, + log_loss, + matthews_corrcoef, + max_error, + mean_absolute_error, + mean_absolute_percentage_error, + mean_gamma_deviance, + mean_poisson_deviance, + mean_squared_error, + mean_squared_log_error, + median_absolute_error, + precision_score, + r2_score, + recall_score, + roc_auc_score, + root_mean_squared_error, + root_mean_squared_log_error, + top_k_accuracy_score, +) +from .cluster import ( + adjusted_mutual_info_score, + adjusted_rand_score, + completeness_score, + fowlkes_mallows_score, + homogeneity_score, + mutual_info_score, + normalized_mutual_info_score, + rand_score, + v_measure_score, +) + + +def _cached_call(cache, estimator, response_method, *args, **kwargs): + """Call estimator with method and args and kwargs.""" + if cache is not None and response_method in cache: + return cache[response_method] + + result, _ = _get_response_values( + estimator, *args, response_method=response_method, **kwargs + ) + + if cache is not None: + cache[response_method] = result + + return result + + +class _MultimetricScorer: + """Callable for multimetric scoring used to avoid repeated calls + to `predict_proba`, `predict`, and `decision_function`. + + `_MultimetricScorer` will return a dictionary of scores corresponding to + the scorers in the dictionary. Note that `_MultimetricScorer` can be + created with a dictionary with one key (i.e. only one actual scorer). + + Parameters + ---------- + scorers : dict + Dictionary mapping names to callable scorers. + + raise_exc : bool, default=True + Whether to raise the exception in `__call__` or not. If set to `False` + a formatted string of the exception details is passed as result of + the failing scorer. + """ + + def __init__(self, *, scorers, raise_exc=True): + self._scorers = scorers + self._raise_exc = raise_exc + + def __call__(self, estimator, *args, **kwargs): + """Evaluate predicted target values.""" + scores = {} + cache = {} if self._use_cache(estimator) else None + cached_call = partial(_cached_call, cache) + + if _routing_enabled(): + routed_params = process_routing(self, "score", **kwargs) + else: + # they all get the same args, and they all get them all + routed_params = Bunch( + **{name: Bunch(score=kwargs) for name in self._scorers} + ) + + for name, scorer in self._scorers.items(): + try: + if isinstance(scorer, _BaseScorer): + score = scorer._score( + cached_call, estimator, *args, **routed_params.get(name).score + ) + else: + score = scorer(estimator, *args, **routed_params.get(name).score) + scores[name] = score + except Exception as e: + if self._raise_exc: + raise e + else: + scores[name] = format_exc() + return scores + + def _use_cache(self, estimator): + """Return True if using a cache is beneficial, thus when a response method will + be called several time. + """ + if len(self._scorers) == 1: # Only one scorer + return False + + counter = Counter( + [ + _check_response_method(estimator, scorer._response_method).__name__ + for scorer in self._scorers.values() + if isinstance(scorer, _BaseScorer) + ] + ) + if any(val > 1 for val in counter.values()): + # The exact same response method or iterable of response methods + # will be called more than once. + return True + + return False + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + return MetadataRouter(owner=self.__class__.__name__).add( + **self._scorers, method_mapping="score" + ) + + +class _BaseScorer(_MetadataRequester): + def __init__(self, score_func, sign, kwargs, response_method="predict"): + self._score_func = score_func + self._sign = sign + self._kwargs = kwargs + self._response_method = response_method + + def _get_pos_label(self): + if "pos_label" in self._kwargs: + return self._kwargs["pos_label"] + score_func_params = signature(self._score_func).parameters + if "pos_label" in score_func_params: + return score_func_params["pos_label"].default + return None + + def __repr__(self): + sign_string = "" if self._sign > 0 else ", greater_is_better=False" + response_method_string = f", response_method={self._response_method!r}" + kwargs_string = "".join([f", {k}={v}" for k, v in self._kwargs.items()]) + + return ( + f"make_scorer({self._score_func.__name__}{sign_string}" + f"{response_method_string}{kwargs_string})" + ) + + def __call__(self, estimator, X, y_true, sample_weight=None, **kwargs): + """Evaluate predicted target values for X relative to y_true. + + Parameters + ---------- + estimator : object + Trained estimator to use for scoring. Must have a predict_proba + method; the output of that is used to compute the score. + + X : {array-like, sparse matrix} + Test data that will be fed to estimator.predict. + + y_true : array-like + Gold standard target values for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + **kwargs : dict + Other parameters passed to the scorer. Refer to + :func:`set_score_request` for more details. + + Only available if `enable_metadata_routing=True`. See the + :ref:`User Guide `. + + .. versionadded:: 1.3 + + Returns + ------- + score : float + Score function applied to prediction of estimator on X. + """ + _raise_for_params(kwargs, self, None) + + _kwargs = copy.deepcopy(kwargs) + if sample_weight is not None: + _kwargs["sample_weight"] = sample_weight + + return self._score(partial(_cached_call, None), estimator, X, y_true, **_kwargs) + + def _warn_overlap(self, message, kwargs): + """Warn if there is any overlap between ``self._kwargs`` and ``kwargs``. + + This method is intended to be used to check for overlap between + ``self._kwargs`` and ``kwargs`` passed as metadata. + """ + _kwargs = set() if self._kwargs is None else set(self._kwargs.keys()) + overlap = _kwargs.intersection(kwargs.keys()) + if overlap: + warnings.warn( + f"{message} Overlapping parameters are: {overlap}", UserWarning + ) + + def set_score_request(self, **kwargs): + """Set requested parameters by the scorer. + + Please see :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Parameters + ---------- + kwargs : dict + Arguments should be of the form ``param_name=alias``, and `alias` + can be one of ``{True, False, None, str}``. + """ + if not _routing_enabled(): + raise RuntimeError( + "This method is only available when metadata routing is enabled." + " You can enable it using" + " sklearn.set_config(enable_metadata_routing=True)." + ) + + self._warn_overlap( + message=( + "You are setting metadata request for parameters which are " + "already set as kwargs for this metric. These set values will be " + "overridden by passed metadata if provided. Please pass them either " + "as metadata or kwargs to `make_scorer`." + ), + kwargs=kwargs, + ) + self._metadata_request = MetadataRequest(owner=self.__class__.__name__) + for param, alias in kwargs.items(): + self._metadata_request.score.add_request(param=param, alias=alias) + return self + + +class _Scorer(_BaseScorer): + def _score(self, method_caller, estimator, X, y_true, **kwargs): + """Evaluate the response method of `estimator` on `X` and `y_true`. + + Parameters + ---------- + method_caller : callable + Returns predictions given an estimator, method name, and other + arguments, potentially caching results. + + estimator : object + Trained estimator to use for scoring. + + X : {array-like, sparse matrix} + Test data that will be fed to clf.decision_function or + clf.predict_proba. + + y_true : array-like + Gold standard target values for X. These must be class labels, + not decision function values. + + **kwargs : dict + Other parameters passed to the scorer. Refer to + :func:`set_score_request` for more details. + + Returns + ------- + score : float + Score function applied to prediction of estimator on X. + """ + self._warn_overlap( + message=( + "There is an overlap between set kwargs of this scorer instance and" + " passed metadata. Please pass them either as kwargs to `make_scorer`" + " or metadata, but not both." + ), + kwargs=kwargs, + ) + + pos_label = None if is_regressor(estimator) else self._get_pos_label() + response_method = _check_response_method(estimator, self._response_method) + y_pred = method_caller( + estimator, response_method.__name__, X, pos_label=pos_label + ) + + scoring_kwargs = {**self._kwargs, **kwargs} + return self._sign * self._score_func(y_true, y_pred, **scoring_kwargs) + + +@validate_params( + { + "scoring": [str, callable, None], + }, + prefer_skip_nested_validation=True, +) +def get_scorer(scoring): + """Get a scorer from string. + + Read more in the :ref:`User Guide `. + :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names + of all available scorers. + + Parameters + ---------- + scoring : str, callable or None + Scoring method as string. If callable it is returned as is. + If None, returns None. + + Returns + ------- + scorer : callable + The scorer. + + Notes + ----- + When passed a string, this function always returns a copy of the scorer + object. Calling `get_scorer` twice for the same scorer results in two + separate scorer objects. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyClassifier + >>> from sklearn.metrics import get_scorer + >>> X = np.reshape([0, 1, -1, -0.5, 2], (-1, 1)) + >>> y = np.array([0, 1, 1, 0, 1]) + >>> classifier = DummyClassifier(strategy="constant", constant=0).fit(X, y) + >>> accuracy = get_scorer("accuracy") + >>> accuracy(classifier, X, y) + 0.4 + """ + if isinstance(scoring, str): + try: + scorer = copy.deepcopy(_SCORERS[scoring]) + except KeyError: + raise ValueError( + "%r is not a valid scoring value. " + "Use sklearn.metrics.get_scorer_names() " + "to get valid options." % scoring + ) + else: + scorer = scoring + return scorer + + +class _PassthroughScorer: + def __init__(self, estimator): + self._estimator = estimator + + def __call__(self, estimator, *args, **kwargs): + """Method that wraps estimator.score""" + return estimator.score(*args, **kwargs) + + def get_metadata_routing(self): + """Get requested data properties. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.3 + + Returns + ------- + routing : MetadataRouter + A :class:`~utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + # This scorer doesn't do any validation or routing, it only exposes the + # requests of the given estimator. This object behaves as a consumer + # rather than a router. Ideally it only exposes the score requests to + # the parent object; however, that requires computing the routing for + # meta-estimators, which would be more time consuming than simply + # returning the child object's requests. + return get_routing_for_object(self._estimator) + + +def _check_multimetric_scoring(estimator, scoring): + """Check the scoring parameter in cases when multiple metrics are allowed. + + In addition, multimetric scoring leverages a caching mechanism to not call the same + estimator response method multiple times. Hence, the scorer is modified to only use + a single response method given a list of response methods and the estimator. + + Parameters + ---------- + estimator : sklearn estimator instance + The estimator for which the scoring will be applied. + + scoring : list, tuple or dict + Strategy to evaluate the performance of the cross-validated model on + the test set. + + The possibilities are: + + - a list or tuple of unique strings; + - a callable returning a dictionary where they keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + See :ref:`multimetric_grid_search` for an example. + + Returns + ------- + scorers_dict : dict + A dict mapping each scorer name to its validated scorer. + """ + err_msg_generic = ( + f"scoring is invalid (got {scoring!r}). Refer to the " + "scoring glossary for details: " + "https://scikit-learn.org/stable/glossary.html#term-scoring" + ) + + if isinstance(scoring, (list, tuple, set)): + err_msg = ( + "The list/tuple elements must be unique strings of predefined scorers. " + ) + try: + keys = set(scoring) + except TypeError as e: + raise ValueError(err_msg) from e + + if len(keys) != len(scoring): + raise ValueError( + f"{err_msg} Duplicate elements were found in" + f" the given list. {scoring!r}" + ) + elif len(keys) > 0: + if not all(isinstance(k, str) for k in keys): + if any(callable(k) for k in keys): + raise ValueError( + f"{err_msg} One or more of the elements " + "were callables. Use a dict of score " + "name mapped to the scorer callable. " + f"Got {scoring!r}" + ) + else: + raise ValueError( + f"{err_msg} Non-string types were found " + f"in the given list. Got {scoring!r}" + ) + scorers = { + scorer: check_scoring(estimator, scoring=scorer) for scorer in scoring + } + else: + raise ValueError(f"{err_msg} Empty list was given. {scoring!r}") + + elif isinstance(scoring, dict): + keys = set(scoring) + if not all(isinstance(k, str) for k in keys): + raise ValueError( + "Non-string types were found in the keys of " + f"the given dict. scoring={scoring!r}" + ) + if len(keys) == 0: + raise ValueError(f"An empty dict was passed. {scoring!r}") + scorers = { + key: check_scoring(estimator, scoring=scorer) + for key, scorer in scoring.items() + } + else: + raise ValueError(err_msg_generic) + + return scorers + + +def _get_response_method(response_method, needs_threshold, needs_proba): + """Handles deprecation of `needs_threshold` and `needs_proba` parameters in + favor of `response_method`. + """ + needs_threshold_provided = needs_threshold != "deprecated" + needs_proba_provided = needs_proba != "deprecated" + response_method_provided = response_method is not None + + needs_threshold = False if needs_threshold == "deprecated" else needs_threshold + needs_proba = False if needs_proba == "deprecated" else needs_proba + + if response_method_provided and (needs_proba_provided or needs_threshold_provided): + raise ValueError( + "You cannot set both `response_method` and `needs_proba` or " + "`needs_threshold` at the same time. Only use `response_method` since " + "the other two are deprecated in version 1.4 and will be removed in 1.6." + ) + + if needs_proba_provided or needs_threshold_provided: + warnings.warn( + ( + "The `needs_threshold` and `needs_proba` parameter are deprecated in " + "version 1.4 and will be removed in 1.6. You can either let " + "`response_method` be `None` or set it to `predict` to preserve the " + "same behaviour." + ), + FutureWarning, + ) + + if response_method_provided: + return response_method + + if needs_proba is True and needs_threshold is True: + raise ValueError( + "You cannot set both `needs_proba` and `needs_threshold` at the same " + "time. Use `response_method` instead since the other two are deprecated " + "in version 1.4 and will be removed in 1.6." + ) + + if needs_proba is True: + response_method = "predict_proba" + elif needs_threshold is True: + response_method = ("decision_function", "predict_proba") + else: + response_method = "predict" + + return response_method + + +@validate_params( + { + "score_func": [callable], + "response_method": [ + None, + list, + tuple, + StrOptions({"predict", "predict_proba", "decision_function"}), + ], + "greater_is_better": ["boolean"], + "needs_proba": ["boolean", Hidden(StrOptions({"deprecated"}))], + "needs_threshold": ["boolean", Hidden(StrOptions({"deprecated"}))], + }, + prefer_skip_nested_validation=True, +) +def make_scorer( + score_func, + *, + response_method=None, + greater_is_better=True, + needs_proba="deprecated", + needs_threshold="deprecated", + **kwargs, +): + """Make a scorer from a performance metric or loss function. + + A scorer is a wrapper around an arbitrary metric or loss function that is called + with the signature `scorer(estimator, X, y_true, **kwargs)`. + + It is accepted in all scikit-learn estimators or functions allowing a `scoring` + parameter. + + The parameter `response_method` allows to specify which method of the estimator + should be used to feed the scoring/loss function. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + score_func : callable + Score function (or loss function) with signature + ``score_func(y, y_pred, **kwargs)``. + + response_method : {"predict_proba", "decision_function", "predict"} or \ + list/tuple of such str, default=None + + Specifies the response method to use get prediction from an estimator + (i.e. :term:`predict_proba`, :term:`decision_function` or + :term:`predict`). Possible choices are: + + - if `str`, it corresponds to the name to the method to return; + - if a list or tuple of `str`, it provides the method names in order of + preference. The method returned corresponds to the first method in + the list and which is implemented by `estimator`. + - if `None`, it is equivalent to `"predict"`. + + .. versionadded:: 1.4 + + greater_is_better : bool, default=True + Whether `score_func` is a score function (default), meaning high is + good, or a loss function, meaning low is good. In the latter case, the + scorer object will sign-flip the outcome of the `score_func`. + + needs_proba : bool, default=False + Whether `score_func` requires `predict_proba` to get probability + estimates out of a classifier. + + If True, for binary `y_true`, the score function is supposed to accept + a 1D `y_pred` (i.e., probability of the positive class, shape + `(n_samples,)`). + + .. deprecated:: 1.4 + `needs_proba` is deprecated in version 1.4 and will be removed in + 1.6. Use `response_method="predict_proba"` instead. + + needs_threshold : bool, default=False + Whether `score_func` takes a continuous decision certainty. + This only works for binary classification using estimators that + have either a `decision_function` or `predict_proba` method. + + If True, for binary `y_true`, the score function is supposed to accept + a 1D `y_pred` (i.e., probability of the positive class or the decision + function, shape `(n_samples,)`). + + For example `average_precision` or the area under the roc curve + can not be computed using discrete predictions alone. + + .. deprecated:: 1.4 + `needs_threshold` is deprecated in version 1.4 and will be removed + in 1.6. Use `response_method=("decision_function", "predict_proba")` + instead to preserve the same behaviour. + + **kwargs : additional arguments + Additional parameters to be passed to `score_func`. + + Returns + ------- + scorer : callable + Callable object that returns a scalar score; greater is better. + + Examples + -------- + >>> from sklearn.metrics import fbeta_score, make_scorer + >>> ftwo_scorer = make_scorer(fbeta_score, beta=2) + >>> ftwo_scorer + make_scorer(fbeta_score, response_method='predict', beta=2) + >>> from sklearn.model_selection import GridSearchCV + >>> from sklearn.svm import LinearSVC + >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, + ... scoring=ftwo_scorer) + """ + response_method = _get_response_method( + response_method, needs_threshold, needs_proba + ) + sign = 1 if greater_is_better else -1 + return _Scorer(score_func, sign, kwargs, response_method) + + +# Standard regression scores +explained_variance_scorer = make_scorer(explained_variance_score) +r2_scorer = make_scorer(r2_score) +max_error_scorer = make_scorer(max_error, greater_is_better=False) +neg_mean_squared_error_scorer = make_scorer(mean_squared_error, greater_is_better=False) +neg_mean_squared_log_error_scorer = make_scorer( + mean_squared_log_error, greater_is_better=False +) +neg_mean_absolute_error_scorer = make_scorer( + mean_absolute_error, greater_is_better=False +) +neg_mean_absolute_percentage_error_scorer = make_scorer( + mean_absolute_percentage_error, greater_is_better=False +) +neg_median_absolute_error_scorer = make_scorer( + median_absolute_error, greater_is_better=False +) +neg_root_mean_squared_error_scorer = make_scorer( + root_mean_squared_error, greater_is_better=False +) +neg_root_mean_squared_log_error_scorer = make_scorer( + root_mean_squared_log_error, greater_is_better=False +) +neg_mean_poisson_deviance_scorer = make_scorer( + mean_poisson_deviance, greater_is_better=False +) + +neg_mean_gamma_deviance_scorer = make_scorer( + mean_gamma_deviance, greater_is_better=False +) + +# Standard Classification Scores +accuracy_scorer = make_scorer(accuracy_score) +balanced_accuracy_scorer = make_scorer(balanced_accuracy_score) +matthews_corrcoef_scorer = make_scorer(matthews_corrcoef) + + +def positive_likelihood_ratio(y_true, y_pred): + return class_likelihood_ratios(y_true, y_pred)[0] + + +def negative_likelihood_ratio(y_true, y_pred): + return class_likelihood_ratios(y_true, y_pred)[1] + + +positive_likelihood_ratio_scorer = make_scorer(positive_likelihood_ratio) +neg_negative_likelihood_ratio_scorer = make_scorer( + negative_likelihood_ratio, greater_is_better=False +) + +# Score functions that need decision values +top_k_accuracy_scorer = make_scorer( + top_k_accuracy_score, + greater_is_better=True, + response_method=("decision_function", "predict_proba"), +) +roc_auc_scorer = make_scorer( + roc_auc_score, + greater_is_better=True, + response_method=("decision_function", "predict_proba"), +) +average_precision_scorer = make_scorer( + average_precision_score, + response_method=("decision_function", "predict_proba"), +) +roc_auc_ovo_scorer = make_scorer( + roc_auc_score, response_method="predict_proba", multi_class="ovo" +) +roc_auc_ovo_weighted_scorer = make_scorer( + roc_auc_score, + response_method="predict_proba", + multi_class="ovo", + average="weighted", +) +roc_auc_ovr_scorer = make_scorer( + roc_auc_score, response_method="predict_proba", multi_class="ovr" +) +roc_auc_ovr_weighted_scorer = make_scorer( + roc_auc_score, + response_method="predict_proba", + multi_class="ovr", + average="weighted", +) + +# Score function for probabilistic classification +neg_log_loss_scorer = make_scorer( + log_loss, greater_is_better=False, response_method="predict_proba" +) +neg_brier_score_scorer = make_scorer( + brier_score_loss, greater_is_better=False, response_method="predict_proba" +) +brier_score_loss_scorer = make_scorer( + brier_score_loss, greater_is_better=False, response_method="predict_proba" +) + + +# Clustering scores +adjusted_rand_scorer = make_scorer(adjusted_rand_score) +rand_scorer = make_scorer(rand_score) +homogeneity_scorer = make_scorer(homogeneity_score) +completeness_scorer = make_scorer(completeness_score) +v_measure_scorer = make_scorer(v_measure_score) +mutual_info_scorer = make_scorer(mutual_info_score) +adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score) +normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score) +fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score) + + +_SCORERS = dict( + explained_variance=explained_variance_scorer, + r2=r2_scorer, + max_error=max_error_scorer, + matthews_corrcoef=matthews_corrcoef_scorer, + neg_median_absolute_error=neg_median_absolute_error_scorer, + neg_mean_absolute_error=neg_mean_absolute_error_scorer, + neg_mean_absolute_percentage_error=neg_mean_absolute_percentage_error_scorer, + neg_mean_squared_error=neg_mean_squared_error_scorer, + neg_mean_squared_log_error=neg_mean_squared_log_error_scorer, + neg_root_mean_squared_error=neg_root_mean_squared_error_scorer, + neg_root_mean_squared_log_error=neg_root_mean_squared_log_error_scorer, + neg_mean_poisson_deviance=neg_mean_poisson_deviance_scorer, + neg_mean_gamma_deviance=neg_mean_gamma_deviance_scorer, + accuracy=accuracy_scorer, + top_k_accuracy=top_k_accuracy_scorer, + roc_auc=roc_auc_scorer, + roc_auc_ovr=roc_auc_ovr_scorer, + roc_auc_ovo=roc_auc_ovo_scorer, + roc_auc_ovr_weighted=roc_auc_ovr_weighted_scorer, + roc_auc_ovo_weighted=roc_auc_ovo_weighted_scorer, + balanced_accuracy=balanced_accuracy_scorer, + average_precision=average_precision_scorer, + neg_log_loss=neg_log_loss_scorer, + neg_brier_score=neg_brier_score_scorer, + positive_likelihood_ratio=positive_likelihood_ratio_scorer, + neg_negative_likelihood_ratio=neg_negative_likelihood_ratio_scorer, + # Cluster metrics that use supervised evaluation + adjusted_rand_score=adjusted_rand_scorer, + rand_score=rand_scorer, + homogeneity_score=homogeneity_scorer, + completeness_score=completeness_scorer, + v_measure_score=v_measure_scorer, + mutual_info_score=mutual_info_scorer, + adjusted_mutual_info_score=adjusted_mutual_info_scorer, + normalized_mutual_info_score=normalized_mutual_info_scorer, + fowlkes_mallows_score=fowlkes_mallows_scorer, +) + + +def get_scorer_names(): + """Get the names of all available scorers. + + These names can be passed to :func:`~sklearn.metrics.get_scorer` to + retrieve the scorer object. + + Returns + ------- + list of str + Names of all available scorers. + + Examples + -------- + >>> from sklearn.metrics import get_scorer_names + >>> all_scorers = get_scorer_names() + >>> type(all_scorers) + + >>> all_scorers[:3] + ['accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score'] + >>> "roc_auc" in all_scorers + True + """ + return sorted(_SCORERS.keys()) + + +for name, metric in [ + ("precision", precision_score), + ("recall", recall_score), + ("f1", f1_score), + ("jaccard", jaccard_score), +]: + _SCORERS[name] = make_scorer(metric, average="binary") + for average in ["macro", "micro", "samples", "weighted"]: + qualified_name = "{0}_{1}".format(name, average) + _SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average) + + +@validate_params( + { + "estimator": [HasMethods("fit")], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "allow_none": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def check_scoring(estimator, scoring=None, *, allow_none=False): + """Determine scorer from user options. + + A TypeError will be thrown if the estimator cannot be scored. + + Parameters + ---------- + estimator : estimator object implementing 'fit' + The object to use to fit the data. + + scoring : str or callable, default=None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + If None, the provided estimator object's `score` method is used. + + allow_none : bool, default=False + If no scoring is specified and the estimator has no score function, we + can either return None or raise an exception. + + Returns + ------- + scoring : callable + A scorer callable object / function with signature + ``scorer(estimator, X, y)``. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.metrics import check_scoring + >>> from sklearn.tree import DecisionTreeClassifier + >>> X, y = load_iris(return_X_y=True) + >>> classifier = DecisionTreeClassifier(max_depth=2).fit(X, y) + >>> scorer = check_scoring(classifier, scoring='accuracy') + >>> scorer(classifier, X, y) + 0.96... + """ + if isinstance(scoring, str): + return get_scorer(scoring) + if callable(scoring): + # Heuristic to ensure user has not passed a metric + module = getattr(scoring, "__module__", None) + if ( + hasattr(module, "startswith") + and module.startswith("sklearn.metrics.") + and not module.startswith("sklearn.metrics._scorer") + and not module.startswith("sklearn.metrics.tests.") + ): + raise ValueError( + "scoring value %r looks like it is a metric " + "function rather than a scorer. A scorer should " + "require an estimator as its first parameter. " + "Please use `make_scorer` to convert a metric " + "to a scorer." % scoring + ) + return get_scorer(scoring) + if scoring is None: + if hasattr(estimator, "score"): + return _PassthroughScorer(estimator) + elif allow_none: + return None + else: + raise TypeError( + "If no scoring is specified, the estimator passed should " + "have a 'score' method. The estimator %r does not." % estimator + ) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__init__.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a332997a84414e07473554421399fb2a01c0beb2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/__init__.py @@ -0,0 +1,52 @@ +""" +The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for +cluster analysis results. There are two forms of evaluation: + +- supervised, which uses a ground truth class values for each sample. +- unsupervised, which does not and measures the 'quality' of the model itself. +""" +from ._bicluster import consensus_score +from ._supervised import ( + adjusted_mutual_info_score, + adjusted_rand_score, + completeness_score, + contingency_matrix, + entropy, + expected_mutual_information, + fowlkes_mallows_score, + homogeneity_completeness_v_measure, + homogeneity_score, + mutual_info_score, + normalized_mutual_info_score, + pair_confusion_matrix, + rand_score, + v_measure_score, +) +from ._unsupervised import ( + calinski_harabasz_score, + davies_bouldin_score, + silhouette_samples, + silhouette_score, +) + +__all__ = [ + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "adjusted_rand_score", + "rand_score", + "completeness_score", + "pair_confusion_matrix", + "contingency_matrix", + "expected_mutual_information", + "homogeneity_completeness_v_measure", + "homogeneity_score", + "mutual_info_score", + "v_measure_score", + "fowlkes_mallows_score", + "entropy", + "silhouette_samples", + "silhouette_score", + "calinski_harabasz_score", + "davies_bouldin_score", + "consensus_score", +] diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_bicluster.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_bicluster.py new file mode 100644 index 0000000000000000000000000000000000000000..713d0bee8fa2eeae90cf3c16117099dd17f7318c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_bicluster.py @@ -0,0 +1,107 @@ +import numpy as np +from scipy.optimize import linear_sum_assignment + +from ...utils._param_validation import StrOptions, validate_params +from ...utils.validation import check_array, check_consistent_length + +__all__ = ["consensus_score"] + + +def _check_rows_and_columns(a, b): + """Unpacks the row and column arrays and checks their shape.""" + check_consistent_length(*a) + check_consistent_length(*b) + checks = lambda x: check_array(x, ensure_2d=False) + a_rows, a_cols = map(checks, a) + b_rows, b_cols = map(checks, b) + return a_rows, a_cols, b_rows, b_cols + + +def _jaccard(a_rows, a_cols, b_rows, b_cols): + """Jaccard coefficient on the elements of the two biclusters.""" + intersection = (a_rows * b_rows).sum() * (a_cols * b_cols).sum() + + a_size = a_rows.sum() * a_cols.sum() + b_size = b_rows.sum() * b_cols.sum() + + return intersection / (a_size + b_size - intersection) + + +def _pairwise_similarity(a, b, similarity): + """Computes pairwise similarity matrix. + + result[i, j] is the Jaccard coefficient of a's bicluster i and b's + bicluster j. + + """ + a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) + n_a = a_rows.shape[0] + n_b = b_rows.shape[0] + result = np.array( + [ + [similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)] + for i in range(n_a) + ] + ) + return result + + +@validate_params( + { + "a": [tuple], + "b": [tuple], + "similarity": [callable, StrOptions({"jaccard"})], + }, + prefer_skip_nested_validation=True, +) +def consensus_score(a, b, *, similarity="jaccard"): + """The similarity of two sets of biclusters. + + Similarity between individual biclusters is computed. Then the + best matching between sets is found using the Hungarian algorithm. + The final score is the sum of similarities divided by the size of + the larger set. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + a : tuple (rows, columns) + Tuple of row and column indicators for a set of biclusters. + + b : tuple (rows, columns) + Another set of biclusters like ``a``. + + similarity : 'jaccard' or callable, default='jaccard' + May be the string "jaccard" to use the Jaccard coefficient, or + any function that takes four arguments, each of which is a 1d + indicator vector: (a_rows, a_columns, b_rows, b_columns). + + Returns + ------- + consensus_score : float + Consensus score, a non-negative value, sum of similarities + divided by size of larger set. + + References + ---------- + + * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis + for bicluster acquisition + `__. + + Examples + -------- + >>> from sklearn.metrics import consensus_score + >>> a = ([[True, False], [False, True]], [[False, True], [True, False]]) + >>> b = ([[False, True], [True, False]], [[True, False], [False, True]]) + >>> consensus_score(a, b, similarity='jaccard') + 1.0 + """ + if similarity == "jaccard": + similarity = _jaccard + matrix = _pairwise_similarity(a, b, similarity) + row_indices, col_indices = linear_sum_assignment(1.0 - matrix) + n_a = len(a[0]) + n_b = len(b[0]) + return matrix[row_indices, col_indices].sum() / max(n_a, n_b) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fbe37dc0deb6a1ed0e5b7bc7dda5e70f79f1d133 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_unsupervised.py b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_unsupervised.py new file mode 100644 index 0000000000000000000000000000000000000000..147e231e7e95e6c75a32aa7871ea984ecab40399 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/cluster/_unsupervised.py @@ -0,0 +1,464 @@ +"""Unsupervised evaluation metrics.""" + +# Authors: Robert Layton +# Arnaud Fouchet +# Thierry Guillemot +# License: BSD 3 clause + + +import functools +from numbers import Integral + +import numpy as np +from scipy.sparse import issparse + +from ...preprocessing import LabelEncoder +from ...utils import _safe_indexing, check_random_state, check_X_y +from ...utils._param_validation import ( + Interval, + StrOptions, + validate_params, +) +from ..pairwise import _VALID_METRICS, pairwise_distances, pairwise_distances_chunked + + +def check_number_of_labels(n_labels, n_samples): + """Check that number of labels are valid. + + Parameters + ---------- + n_labels : int + Number of labels. + + n_samples : int + Number of samples. + """ + if not 1 < n_labels < n_samples: + raise ValueError( + "Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)" + % n_labels + ) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "labels": ["array-like"], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "sample_size": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + }, + prefer_skip_nested_validation=True, +) +def silhouette_score( + X, labels, *, metric="euclidean", sample_size=None, random_state=None, **kwds +): + """Compute the mean Silhouette Coefficient of all samples. + + The Silhouette Coefficient is calculated using the mean intra-cluster + distance (``a``) and the mean nearest-cluster distance (``b``) for each + sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, + b)``. To clarify, ``b`` is the distance between a sample and the nearest + cluster that the sample is not a part of. + Note that Silhouette Coefficient is only defined if number of labels + is ``2 <= n_labels <= n_samples - 1``. + + This function returns the mean Silhouette Coefficient over all samples. + To obtain the values for each sample, use :func:`silhouette_samples`. + + The best value is 1 and the worst value is -1. Values near 0 indicate + overlapping clusters. Negative values generally indicate that a sample has + been assigned to the wrong cluster, as a different cluster is more similar. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == \ + "precomputed" or (n_samples_a, n_features) otherwise + An array of pairwise distances between samples, or a feature array. + + labels : array-like of shape (n_samples,) + Predicted labels for each sample. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + allowed by :func:`~sklearn.metrics.pairwise_distances`. If ``X`` is + the distance array itself, use ``metric="precomputed"``. + + sample_size : int, default=None + The size of the sample to use when computing the Silhouette Coefficient + on a random subset of the data. + If ``sample_size is None``, no sampling is used. + + random_state : int, RandomState instance or None, default=None + Determines random number generation for selecting a subset of samples. + Used when ``sample_size is not None``. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + **kwds : optional keyword parameters + Any further parameters are passed directly to the distance function. + If using a scipy.spatial.distance metric, the parameters are still + metric dependent. See the scipy docs for usage examples. + + Returns + ------- + silhouette : float + Mean Silhouette Coefficient for all samples. + + References + ---------- + + .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the + Interpretation and Validation of Cluster Analysis". Computational + and Applied Mathematics 20: 53-65. + `_ + + .. [2] `Wikipedia entry on the Silhouette Coefficient + `_ + + Examples + -------- + >>> from sklearn.datasets import make_blobs + >>> from sklearn.cluster import KMeans + >>> from sklearn.metrics import silhouette_score + >>> X, y = make_blobs(random_state=42) + >>> kmeans = KMeans(n_clusters=2, random_state=42) + >>> silhouette_score(X, kmeans.fit_predict(X)) + 0.49... + """ + if sample_size is not None: + X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"]) + random_state = check_random_state(random_state) + indices = random_state.permutation(X.shape[0])[:sample_size] + if metric == "precomputed": + X, labels = X[indices].T[indices].T, labels[indices] + else: + X, labels = X[indices], labels[indices] + return np.mean(silhouette_samples(X, labels, metric=metric, **kwds)) + + +def _silhouette_reduce(D_chunk, start, labels, label_freqs): + """Accumulate silhouette statistics for vertical chunk of X. + + Parameters + ---------- + D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples) + Precomputed distances for a chunk. If a sparse matrix is provided, + only CSR format is accepted. + start : int + First index in the chunk. + labels : array-like of shape (n_samples,) + Corresponding cluster labels, encoded as {0, ..., n_clusters-1}. + label_freqs : array-like + Distribution of cluster labels in ``labels``. + """ + n_chunk_samples = D_chunk.shape[0] + # accumulate distances from each sample to each cluster + cluster_distances = np.zeros( + (n_chunk_samples, len(label_freqs)), dtype=D_chunk.dtype + ) + + if issparse(D_chunk): + if D_chunk.format != "csr": + raise TypeError( + "Expected CSR matrix. Please pass sparse matrix in CSR format." + ) + for i in range(n_chunk_samples): + indptr = D_chunk.indptr + indices = D_chunk.indices[indptr[i] : indptr[i + 1]] + sample_weights = D_chunk.data[indptr[i] : indptr[i + 1]] + sample_labels = np.take(labels, indices) + cluster_distances[i] += np.bincount( + sample_labels, weights=sample_weights, minlength=len(label_freqs) + ) + else: + for i in range(n_chunk_samples): + sample_weights = D_chunk[i] + sample_labels = labels + cluster_distances[i] += np.bincount( + sample_labels, weights=sample_weights, minlength=len(label_freqs) + ) + + # intra_index selects intra-cluster distances within cluster_distances + end = start + n_chunk_samples + intra_index = (np.arange(n_chunk_samples), labels[start:end]) + # intra_cluster_distances are averaged over cluster size outside this function + intra_cluster_distances = cluster_distances[intra_index] + # of the remaining distances we normalise and extract the minimum + cluster_distances[intra_index] = np.inf + cluster_distances /= label_freqs + inter_cluster_distances = cluster_distances.min(axis=1) + return intra_cluster_distances, inter_cluster_distances + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "labels": ["array-like"], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + }, + prefer_skip_nested_validation=True, +) +def silhouette_samples(X, labels, *, metric="euclidean", **kwds): + """Compute the Silhouette Coefficient for each sample. + + The Silhouette Coefficient is a measure of how well samples are clustered + with samples that are similar to themselves. Clustering models with a high + Silhouette Coefficient are said to be dense, where samples in the same + cluster are similar to each other, and well separated, where samples in + different clusters are not very similar to each other. + + The Silhouette Coefficient is calculated using the mean intra-cluster + distance (``a``) and the mean nearest-cluster distance (``b``) for each + sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, + b)``. + Note that Silhouette Coefficient is only defined if number of labels + is 2 ``<= n_labels <= n_samples - 1``. + + This function returns the Silhouette Coefficient for each sample. + + The best value is 1 and the worst value is -1. Values near 0 indicate + overlapping clusters. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == \ + "precomputed" or (n_samples_a, n_features) otherwise + An array of pairwise distances between samples, or a feature array. If + a sparse matrix is provided, CSR format should be favoured avoiding + an additional copy. + + labels : array-like of shape (n_samples,) + Label values for each sample. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + allowed by :func:`~sklearn.metrics.pairwise_distances`. + If ``X`` is the distance array itself, use "precomputed" as the metric. + Precomputed distance matrices must have 0 along the diagonal. + + **kwds : optional keyword parameters + Any further parameters are passed directly to the distance function. + If using a ``scipy.spatial.distance`` metric, the parameters are still + metric dependent. See the scipy docs for usage examples. + + Returns + ------- + silhouette : array-like of shape (n_samples,) + Silhouette Coefficients for each sample. + + References + ---------- + + .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the + Interpretation and Validation of Cluster Analysis". Computational + and Applied Mathematics 20: 53-65. + `_ + + .. [2] `Wikipedia entry on the Silhouette Coefficient + `_ + + Examples + -------- + >>> from sklearn.metrics import silhouette_samples + >>> from sklearn.datasets import make_blobs + >>> from sklearn.cluster import KMeans + >>> X, y = make_blobs(n_samples=50, random_state=42) + >>> kmeans = KMeans(n_clusters=3, random_state=42) + >>> labels = kmeans.fit_predict(X) + >>> silhouette_samples(X, labels) + array([...]) + """ + X, labels = check_X_y(X, labels, accept_sparse=["csr"]) + + # Check for non-zero diagonal entries in precomputed distance matrix + if metric == "precomputed": + error_msg = ValueError( + "The precomputed distance matrix contains non-zero " + "elements on the diagonal. Use np.fill_diagonal(X, 0)." + ) + if X.dtype.kind == "f": + atol = np.finfo(X.dtype).eps * 100 + if np.any(np.abs(X.diagonal()) > atol): + raise error_msg + elif np.any(X.diagonal() != 0): # integral dtype + raise error_msg + + le = LabelEncoder() + labels = le.fit_transform(labels) + n_samples = len(labels) + label_freqs = np.bincount(labels) + check_number_of_labels(len(le.classes_), n_samples) + + kwds["metric"] = metric + reduce_func = functools.partial( + _silhouette_reduce, labels=labels, label_freqs=label_freqs + ) + results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds)) + intra_clust_dists, inter_clust_dists = results + intra_clust_dists = np.concatenate(intra_clust_dists) + inter_clust_dists = np.concatenate(inter_clust_dists) + + denom = (label_freqs - 1).take(labels, mode="clip") + with np.errstate(divide="ignore", invalid="ignore"): + intra_clust_dists /= denom + + sil_samples = inter_clust_dists - intra_clust_dists + with np.errstate(divide="ignore", invalid="ignore"): + sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists) + # nan values are for clusters of size 1, and should be 0 + return np.nan_to_num(sil_samples) + + +@validate_params( + { + "X": ["array-like"], + "labels": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def calinski_harabasz_score(X, labels): + """Compute the Calinski and Harabasz score. + + It is also known as the Variance Ratio Criterion. + + The score is defined as ratio of the sum of between-cluster dispersion and + of within-cluster dispersion. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + A list of ``n_features``-dimensional data points. Each row corresponds + to a single data point. + + labels : array-like of shape (n_samples,) + Predicted labels for each sample. + + Returns + ------- + score : float + The resulting Calinski-Harabasz score. + + References + ---------- + .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster + analysis". Communications in Statistics + `_ + + Examples + -------- + >>> from sklearn.datasets import make_blobs + >>> from sklearn.cluster import KMeans + >>> from sklearn.metrics import calinski_harabasz_score + >>> X, _ = make_blobs(random_state=0) + >>> kmeans = KMeans(n_clusters=3, random_state=0,).fit(X) + >>> calinski_harabasz_score(X, kmeans.labels_) + 114.8... + """ + X, labels = check_X_y(X, labels) + le = LabelEncoder() + labels = le.fit_transform(labels) + + n_samples, _ = X.shape + n_labels = len(le.classes_) + + check_number_of_labels(n_labels, n_samples) + + extra_disp, intra_disp = 0.0, 0.0 + mean = np.mean(X, axis=0) + for k in range(n_labels): + cluster_k = X[labels == k] + mean_k = np.mean(cluster_k, axis=0) + extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2) + intra_disp += np.sum((cluster_k - mean_k) ** 2) + + return ( + 1.0 + if intra_disp == 0.0 + else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0)) + ) + + +@validate_params( + { + "X": ["array-like"], + "labels": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def davies_bouldin_score(X, labels): + """Compute the Davies-Bouldin score. + + The score is defined as the average similarity measure of each cluster with + its most similar cluster, where similarity is the ratio of within-cluster + distances to between-cluster distances. Thus, clusters which are farther + apart and less dispersed will result in a better score. + + The minimum score is zero, with lower values indicating better clustering. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.20 + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + A list of ``n_features``-dimensional data points. Each row corresponds + to a single data point. + + labels : array-like of shape (n_samples,) + Predicted labels for each sample. + + Returns + ------- + score: float + The resulting Davies-Bouldin score. + + References + ---------- + .. [1] Davies, David L.; Bouldin, Donald W. (1979). + `"A Cluster Separation Measure" + `__. + IEEE Transactions on Pattern Analysis and Machine Intelligence. + PAMI-1 (2): 224-227 + + Examples + -------- + >>> from sklearn.metrics import davies_bouldin_score + >>> X = [[0, 1], [1, 1], [3, 4]] + >>> labels = [0, 0, 1] + >>> davies_bouldin_score(X, labels) + 0.12... + """ + X, labels = check_X_y(X, labels) + le = LabelEncoder() + labels = le.fit_transform(labels) + n_samples, _ = X.shape + n_labels = len(le.classes_) + check_number_of_labels(n_labels, n_samples) + + intra_dists = np.zeros(n_labels) + centroids = np.zeros((n_labels, len(X[0])), dtype=float) + for k in range(n_labels): + cluster_k = _safe_indexing(X, labels == k) + centroid = cluster_k.mean(axis=0) + centroids[k] = centroid + intra_dists[k] = np.average(pairwise_distances(cluster_k, [centroid])) + + centroid_distances = pairwise_distances(centroids) + + if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0): + return 0.0 + + centroid_distances[centroid_distances == 0] = np.inf + combined_intra_dists = intra_dists[:, None] + intra_dists + scores = np.max(combined_intra_dists / centroid_distances, axis=1) + return np.mean(scores) diff --git a/venv/lib/python3.10/site-packages/sklearn/metrics/pairwise.py b/venv/lib/python3.10/site-packages/sklearn/metrics/pairwise.py new file mode 100644 index 0000000000000000000000000000000000000000..e1329123ccbe249fa74b226dc465ede35fc49aa5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/metrics/pairwise.py @@ -0,0 +1,2522 @@ +# Authors: Alexandre Gramfort +# Mathieu Blondel +# Robert Layton +# Andreas Mueller +# Philippe Gervais +# Lars Buitinck +# Joel Nothman +# License: BSD 3 clause + +import itertools +import warnings +from functools import partial +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy.sparse import csr_matrix, issparse +from scipy.spatial import distance + +from .. import config_context +from ..exceptions import DataConversionWarning +from ..preprocessing import normalize +from ..utils import ( + check_array, + gen_batches, + gen_even_slices, + get_chunk_n_rows, + is_scalar_nan, +) +from ..utils._mask import _get_mask +from ..utils._param_validation import ( + Hidden, + Interval, + MissingValues, + Options, + StrOptions, + validate_params, +) +from ..utils.extmath import row_norms, safe_sparse_dot +from ..utils.fixes import parse_version, sp_base_version +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _num_samples, check_non_negative +from ._pairwise_distances_reduction import ArgKmin +from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan + + +# Utility Functions +def _return_float_dtype(X, Y): + """ + 1. If dtype of X and Y is float32, then dtype float32 is returned. + 2. Else dtype float is returned. + """ + if not issparse(X) and not isinstance(X, np.ndarray): + X = np.asarray(X) + + if Y is None: + Y_dtype = X.dtype + elif not issparse(Y) and not isinstance(Y, np.ndarray): + Y = np.asarray(Y) + Y_dtype = Y.dtype + else: + Y_dtype = Y.dtype + + if X.dtype == Y_dtype == np.float32: + dtype = np.float32 + else: + dtype = float + + return X, Y, dtype + + +def check_pairwise_arrays( + X, + Y, + *, + precomputed=False, + dtype=None, + accept_sparse="csr", + force_all_finite=True, + copy=False, +): + """Set X and Y appropriately and checks inputs. + + If Y is None, it is set as a pointer to X (i.e. not a copy). + If Y is given, this does not happen. + All distance metrics should use this function first to assert that the + given parameters are correct and safe to use. + + Specifically, this function first ensures that both X and Y are arrays, + then checks that they are at least two dimensional while ensuring that + their elements are floats (or dtype if provided). Finally, the function + checks that the size of the second dimension of the two arrays is equal, or + the equivalent check for a precomputed distance matrix. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) + + precomputed : bool, default=False + True if X is to be treated as precomputed distances to the samples in + Y. + + dtype : str, type, list of type, default=None + Data type required for X and Y. If None, the dtype will be an + appropriate float type selected by _return_float_dtype. + + .. versionadded:: 0.18 + + accept_sparse : str, bool or list/tuple of str, default='csr' + String[s] representing allowed sparse matrix formats, such as 'csc', + 'csr', etc. If the input is sparse but not in the allowed format, + it will be converted to the first listed format. True allows the input + to be any format. False means that a sparse matrix input will + raise an error. + + force_all_finite : bool or 'allow-nan', default=True + Whether to raise an error on np.inf, np.nan, pd.NA in array. The + possibilities are: + + - True: Force all values of array to be finite. + - False: accepts np.inf, np.nan, pd.NA in array. + - 'allow-nan': accepts only np.nan and pd.NA values in array. Values + cannot be infinite. + + .. versionadded:: 0.22 + ``force_all_finite`` accepts the string ``'allow-nan'``. + + .. versionchanged:: 0.23 + Accepts `pd.NA` and converts it into `np.nan`. + + copy : bool, default=False + Whether a forced copy will be triggered. If copy=False, a copy might + be triggered by a conversion. + + .. versionadded:: 0.22 + + Returns + ------- + safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + An array equal to X, guaranteed to be a numpy array. + + safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) + An array equal to Y if Y was not None, guaranteed to be a numpy array. + If Y was None, safe_Y will be a pointer to X. + """ + X, Y, dtype_float = _return_float_dtype(X, Y) + + estimator = "check_pairwise_arrays" + if dtype is None: + dtype = dtype_float + + if Y is X or Y is None: + X = Y = check_array( + X, + accept_sparse=accept_sparse, + dtype=dtype, + copy=copy, + force_all_finite=force_all_finite, + estimator=estimator, + ) + else: + X = check_array( + X, + accept_sparse=accept_sparse, + dtype=dtype, + copy=copy, + force_all_finite=force_all_finite, + estimator=estimator, + ) + Y = check_array( + Y, + accept_sparse=accept_sparse, + dtype=dtype, + copy=copy, + force_all_finite=force_all_finite, + estimator=estimator, + ) + + if precomputed: + if X.shape[1] != Y.shape[0]: + raise ValueError( + "Precomputed metric requires shape " + "(n_queries, n_indexed). Got (%d, %d) " + "for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0]) + ) + elif X.shape[1] != Y.shape[1]: + raise ValueError( + "Incompatible dimension for X and Y matrices: " + "X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1]) + ) + + return X, Y + + +def check_paired_arrays(X, Y): + """Set X and Y appropriately and checks inputs for paired distances. + + All paired distance metrics should use this function first to assert that + the given parameters are correct and safe to use. + + Specifically, this function first ensures that both X and Y are arrays, + then checks that they are at least two dimensional while ensuring that + their elements are floats. Finally, the function checks that the size + of the dimensions of the two arrays are equal. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) + + Returns + ------- + safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + An array equal to X, guaranteed to be a numpy array. + + safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) + An array equal to Y if Y was not None, guaranteed to be a numpy array. + If Y was None, safe_Y will be a pointer to X. + """ + X, Y = check_pairwise_arrays(X, Y) + if X.shape != Y.shape: + raise ValueError( + "X and Y should be of same shape. They were respectively %r and %r long." + % (X.shape, Y.shape) + ) + return X, Y + + +# Pairwise distances +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "Y_norm_squared": ["array-like", None], + "squared": ["boolean"], + "X_norm_squared": ["array-like", None], + }, + prefer_skip_nested_validation=True, +) +def euclidean_distances( + X, Y=None, *, Y_norm_squared=None, squared=False, X_norm_squared=None +): + """ + Compute the distance matrix between each pair from a vector array X and Y. + + For efficiency reasons, the euclidean distance between a pair of row + vector x and y is computed as:: + + dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y)) + + This formulation has two advantages over other ways of computing distances. + First, it is computationally efficient when dealing with sparse data. + Second, if one argument varies but the other remains unchanged, then + `dot(x, x)` and/or `dot(y, y)` can be pre-computed. + + However, this is not the most precise way of doing this computation, + because this equation potentially suffers from "catastrophic cancellation". + Also, the distance matrix returned by this function may not be exactly + symmetric as required by, e.g., ``scipy.spatial.distance`` functions. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + An array where each row is a sample and each column is a feature. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \ + default=None + An array where each row is a sample and each column is a feature. + If `None`, method uses `Y=X`. + + Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \ + or (1, n_samples_Y), default=None + Pre-computed dot-products of vectors in Y (e.g., + ``(Y**2).sum(axis=1)``) + May be ignored in some cases, see the note below. + + squared : bool, default=False + Return squared Euclidean distances. + + X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \ + or (1, n_samples_X), default=None + Pre-computed dot-products of vectors in X (e.g., + ``(X**2).sum(axis=1)``) + May be ignored in some cases, see the note below. + + Returns + ------- + distances : ndarray of shape (n_samples_X, n_samples_Y) + Returns the distances between the row vectors of `X` + and the row vectors of `Y`. + + See Also + -------- + paired_distances : Distances between pairs of elements of X and Y. + + Notes + ----- + To achieve a better accuracy, `X_norm_squared` and `Y_norm_squared` may be + unused if they are passed as `np.float32`. + + Examples + -------- + >>> from sklearn.metrics.pairwise import euclidean_distances + >>> X = [[0, 1], [1, 1]] + >>> # distance between rows of X + >>> euclidean_distances(X, X) + array([[0., 1.], + [1., 0.]]) + >>> # get distance to origin + >>> euclidean_distances(X, [[0, 0]]) + array([[1. ], + [1.41421356]]) + """ + X, Y = check_pairwise_arrays(X, Y) + + if X_norm_squared is not None: + X_norm_squared = check_array(X_norm_squared, ensure_2d=False) + original_shape = X_norm_squared.shape + if X_norm_squared.shape == (X.shape[0],): + X_norm_squared = X_norm_squared.reshape(-1, 1) + if X_norm_squared.shape == (1, X.shape[0]): + X_norm_squared = X_norm_squared.T + if X_norm_squared.shape != (X.shape[0], 1): + raise ValueError( + f"Incompatible dimensions for X of shape {X.shape} and " + f"X_norm_squared of shape {original_shape}." + ) + + if Y_norm_squared is not None: + Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False) + original_shape = Y_norm_squared.shape + if Y_norm_squared.shape == (Y.shape[0],): + Y_norm_squared = Y_norm_squared.reshape(1, -1) + if Y_norm_squared.shape == (Y.shape[0], 1): + Y_norm_squared = Y_norm_squared.T + if Y_norm_squared.shape != (1, Y.shape[0]): + raise ValueError( + f"Incompatible dimensions for Y of shape {Y.shape} and " + f"Y_norm_squared of shape {original_shape}." + ) + + return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared) + + +def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None, squared=False): + """Computational part of euclidean_distances + + Assumes inputs are already checked. + + If norms are passed as float32, they are unused. If arrays are passed as + float32, norms needs to be recomputed on upcast chunks. + TODO: use a float64 accumulator in row_norms to avoid the latter. + """ + if X_norm_squared is not None and X_norm_squared.dtype != np.float32: + XX = X_norm_squared.reshape(-1, 1) + elif X.dtype != np.float32: + XX = row_norms(X, squared=True)[:, np.newaxis] + else: + XX = None + + if Y is X: + YY = None if XX is None else XX.T + else: + if Y_norm_squared is not None and Y_norm_squared.dtype != np.float32: + YY = Y_norm_squared.reshape(1, -1) + elif Y.dtype != np.float32: + YY = row_norms(Y, squared=True)[np.newaxis, :] + else: + YY = None + + if X.dtype == np.float32 or Y.dtype == np.float32: + # To minimize precision issues with float32, we compute the distance + # matrix on chunks of X and Y upcast to float64 + distances = _euclidean_distances_upcast(X, XX, Y, YY) + else: + # if dtype is already float64, no need to chunk and upcast + distances = -2 * safe_sparse_dot(X, Y.T, dense_output=True) + distances += XX + distances += YY + np.maximum(distances, 0, out=distances) + + # Ensure that distances between vectors and themselves are set to 0.0. + # This may not be the case due to floating point rounding errors. + if X is Y: + np.fill_diagonal(distances, 0) + + return distances if squared else np.sqrt(distances, out=distances) + + +@validate_params( + { + "X": ["array-like"], + "Y": ["array-like", None], + "squared": ["boolean"], + "missing_values": [MissingValues(numeric_only=True)], + "copy": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def nan_euclidean_distances( + X, Y=None, *, squared=False, missing_values=np.nan, copy=True +): + """Calculate the euclidean distances in the presence of missing values. + + Compute the euclidean distance between each pair of samples in X and Y, + where Y=X is assumed if Y=None. When calculating the distance between a + pair of samples, this formulation ignores feature coordinates with a + missing value in either sample and scales up the weight of the remaining + coordinates: + + dist(x,y) = sqrt(weight * sq. distance from present coordinates) + where, + weight = Total # of coordinates / # of present coordinates + + For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]`` + is: + + .. math:: + \\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)} + + If all the coordinates are missing or if there are no common present + coordinates then NaN is returned for that pair. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) + An array where each row is a sample and each column is a feature. + + Y : array-like of shape (n_samples_Y, n_features), default=None + An array where each row is a sample and each column is a feature. + If `None`, method uses `Y=X`. + + squared : bool, default=False + Return squared Euclidean distances. + + missing_values : np.nan, float or int, default=np.nan + Representation of missing value. + + copy : bool, default=True + Make and use a deep copy of X and Y (if Y exists). + + Returns + ------- + distances : ndarray of shape (n_samples_X, n_samples_Y) + Returns the distances between the row vectors of `X` + and the row vectors of `Y`. + + See Also + -------- + paired_distances : Distances between pairs of elements of X and Y. + + References + ---------- + * John K. Dixon, "Pattern Recognition with Partly Missing Data", + IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue: + 10, pp. 617 - 621, Oct. 1979. + http://ieeexplore.ieee.org/abstract/document/4310090/ + + Examples + -------- + >>> from sklearn.metrics.pairwise import nan_euclidean_distances + >>> nan = float("NaN") + >>> X = [[0, 1], [1, nan]] + >>> nan_euclidean_distances(X, X) # distance between rows of X + array([[0. , 1.41421356], + [1.41421356, 0. ]]) + + >>> # get distance to origin + >>> nan_euclidean_distances(X, [[0, 0]]) + array([[1. ], + [1.41421356]]) + """ + + force_all_finite = "allow-nan" if is_scalar_nan(missing_values) else True + X, Y = check_pairwise_arrays( + X, Y, accept_sparse=False, force_all_finite=force_all_finite, copy=copy + ) + # Get missing mask for X + missing_X = _get_mask(X, missing_values) + + # Get missing mask for Y + missing_Y = missing_X if Y is X else _get_mask(Y, missing_values) + + # set missing values to zero + X[missing_X] = 0 + Y[missing_Y] = 0 + + distances = euclidean_distances(X, Y, squared=True) + + # Adjust distances for missing values + XX = X * X + YY = Y * Y + distances -= np.dot(XX, missing_Y.T) + distances -= np.dot(missing_X, YY.T) + + np.clip(distances, 0, None, out=distances) + + if X is Y: + # Ensure that distances between vectors and themselves are set to 0.0. + # This may not be the case due to floating point rounding errors. + np.fill_diagonal(distances, 0.0) + + present_X = 1 - missing_X + present_Y = present_X if Y is X else ~missing_Y + present_count = np.dot(present_X, present_Y.T) + distances[present_count == 0] = np.nan + # avoid divide by zero + np.maximum(1, present_count, out=present_count) + distances /= present_count + distances *= X.shape[1] + + if not squared: + np.sqrt(distances, out=distances) + + return distances + + +def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None): + """Euclidean distances between X and Y. + + Assumes X and Y have float32 dtype. + Assumes XX and YY have float64 dtype or are None. + + X and Y are upcast to float64 by chunks, which size is chosen to limit + memory increase by approximately 10% (at least 10MiB). + """ + n_samples_X = X.shape[0] + n_samples_Y = Y.shape[0] + n_features = X.shape[1] + + distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32) + + if batch_size is None: + x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1 + y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1 + + # Allow 10% more memory than X, Y and the distance matrix take (at + # least 10MiB) + maxmem = max( + ( + (x_density * n_samples_X + y_density * n_samples_Y) * n_features + + (x_density * n_samples_X * y_density * n_samples_Y) + ) + / 10, + 10 * 2**17, + ) + + # The increase amount of memory in 8-byte blocks is: + # - x_density * batch_size * n_features (copy of chunk of X) + # - y_density * batch_size * n_features (copy of chunk of Y) + # - batch_size * batch_size (chunk of distance matrix) + # Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem + # xd=x_density and yd=y_density + tmp = (x_density + y_density) * n_features + batch_size = (-tmp + np.sqrt(tmp**2 + 4 * maxmem)) / 2 + batch_size = max(int(batch_size), 1) + + x_batches = gen_batches(n_samples_X, batch_size) + + for i, x_slice in enumerate(x_batches): + X_chunk = X[x_slice].astype(np.float64) + if XX is None: + XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis] + else: + XX_chunk = XX[x_slice] + + y_batches = gen_batches(n_samples_Y, batch_size) + + for j, y_slice in enumerate(y_batches): + if X is Y and j < i: + # when X is Y the distance matrix is symmetric so we only need + # to compute half of it. + d = distances[y_slice, x_slice].T + + else: + Y_chunk = Y[y_slice].astype(np.float64) + if YY is None: + YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :] + else: + YY_chunk = YY[:, y_slice] + + d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True) + d += XX_chunk + d += YY_chunk + + distances[x_slice, y_slice] = d.astype(np.float32, copy=False) + + return distances + + +def _argmin_min_reduce(dist, start): + # `start` is specified in the signature but not used. This is because the higher + # order `pairwise_distances_chunked` function needs reduction functions that are + # passed as argument to have a two arguments signature. + indices = dist.argmin(axis=1) + values = dist[np.arange(dist.shape[0]), indices] + return indices, values + + +def _argmin_reduce(dist, start): + # `start` is specified in the signature but not used. This is because the higher + # order `pairwise_distances_chunked` function needs reduction functions that are + # passed as argument to have a two arguments signature. + return dist.argmin(axis=1) + + +_VALID_METRICS = [ + "euclidean", + "l2", + "l1", + "manhattan", + "cityblock", + "braycurtis", + "canberra", + "chebyshev", + "correlation", + "cosine", + "dice", + "hamming", + "jaccard", + "mahalanobis", + "matching", + "minkowski", + "rogerstanimoto", + "russellrao", + "seuclidean", + "sokalmichener", + "sokalsneath", + "sqeuclidean", + "yule", + "wminkowski", + "nan_euclidean", + "haversine", +] +if sp_base_version < parse_version("1.11"): # pragma: no cover + # Deprecated in SciPy 1.9 and removed in SciPy 1.11 + _VALID_METRICS += ["kulsinski"] +if sp_base_version < parse_version("1.9"): + # Deprecated in SciPy 1.0 and removed in SciPy 1.9 + _VALID_METRICS += ["matching"] + +_NAN_METRICS = ["nan_euclidean"] + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + "metric": [ + StrOptions(set(_VALID_METRICS).union(ArgKmin.valid_metrics())), + callable, + ], + "metric_kwargs": [dict, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def pairwise_distances_argmin_min( + X, Y, *, axis=1, metric="euclidean", metric_kwargs=None +): + """Compute minimum distances between one point and a set of points. + + This function computes for each row in X, the index of the row of Y which + is closest (according to the specified distance). The minimal distances are + also returned. + + This is mostly equivalent to calling: + + (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis), + pairwise_distances(X, Y=Y, metric=metric).min(axis=axis)) + + but uses much less memory, and is faster for large arrays. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + Array containing points. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) + Array containing points. + + axis : int, default=1 + Axis along which the argmin and distances are to be computed. + + metric : str or callable, default='euclidean' + Metric to use for distance computation. Any metric from scikit-learn + or scipy.spatial.distance can be used. + + If metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays as input and return one value indicating the + distance between them. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + Valid values for metric are: + + - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan'] + + - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', + 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', + 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'yule'] + + See the documentation for scipy.spatial.distance for details on these + metrics. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11. + + .. note:: + `'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead). + + metric_kwargs : dict, default=None + Keyword arguments to pass to specified metric function. + + Returns + ------- + argmin : ndarray + Y[argmin[i], :] is the row in Y that is closest to X[i, :]. + + distances : ndarray + The array of minimum distances. `distances[i]` is the distance between + the i-th row in X and the argmin[i]-th row in Y. + + See Also + -------- + pairwise_distances : Distances between every pair of samples of X and Y. + pairwise_distances_argmin : Same as `pairwise_distances_argmin_min` but only + returns the argmins. + + Examples + -------- + >>> from sklearn.metrics.pairwise import pairwise_distances_argmin_min + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> argmin, distances = pairwise_distances_argmin_min(X, Y) + >>> argmin + array([0, 1]) + >>> distances + array([1., 1.]) + """ + X, Y = check_pairwise_arrays(X, Y) + + if axis == 0: + X, Y = Y, X + + if metric_kwargs is None: + metric_kwargs = {} + + if ArgKmin.is_usable_for(X, Y, metric): + # This is an adaptor for one "sqeuclidean" specification. + # For this backend, we can directly use "sqeuclidean". + if metric_kwargs.get("squared", False) and metric == "euclidean": + metric = "sqeuclidean" + metric_kwargs = {} + + values, indices = ArgKmin.compute( + X=X, + Y=Y, + k=1, + metric=metric, + metric_kwargs=metric_kwargs, + strategy="auto", + return_distance=True, + ) + values = values.flatten() + indices = indices.flatten() + else: + # Joblib-based backend, which is used when user-defined callable + # are passed for metric. + + # This won't be used in the future once PairwiseDistancesReductions support: + # - DistanceMetrics which work on supposedly binary data + # - CSR-dense and dense-CSR case if 'euclidean' in metric. + + # Turn off check for finiteness because this is costly and because arrays + # have already been validated. + with config_context(assume_finite=True): + indices, values = zip( + *pairwise_distances_chunked( + X, Y, reduce_func=_argmin_min_reduce, metric=metric, **metric_kwargs + ) + ) + indices = np.concatenate(indices) + values = np.concatenate(values) + + return indices, values + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + "metric": [ + StrOptions(set(_VALID_METRICS).union(ArgKmin.valid_metrics())), + callable, + ], + "metric_kwargs": [dict, None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None): + """Compute minimum distances between one point and a set of points. + + This function computes for each row in X, the index of the row of Y which + is closest (according to the specified distance). + + This is mostly equivalent to calling: + + pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis) + + but uses much less memory, and is faster for large arrays. + + This function works with dense 2D arrays only. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + Array containing points. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) + Arrays containing points. + + axis : int, default=1 + Axis along which the argmin and distances are to be computed. + + metric : str or callable, default="euclidean" + Metric to use for distance computation. Any metric from scikit-learn + or scipy.spatial.distance can be used. + + If metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays as input and return one value indicating the + distance between them. This works for Scipy's metrics, but is less + efficient than passing the metric name as a string. + + Distance matrices are not supported. + + Valid values for metric are: + + - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan'] + + - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', + 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', + 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', + 'yule'] + + See the documentation for scipy.spatial.distance for details on these + metrics. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11. + + .. note:: + `'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead). + + metric_kwargs : dict, default=None + Keyword arguments to pass to specified metric function. + + Returns + ------- + argmin : numpy.ndarray + Y[argmin[i], :] is the row in Y that is closest to X[i, :]. + + See Also + -------- + pairwise_distances : Distances between every pair of samples of X and Y. + pairwise_distances_argmin_min : Same as `pairwise_distances_argmin` but also + returns the distances. + + Examples + -------- + >>> from sklearn.metrics.pairwise import pairwise_distances_argmin + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> pairwise_distances_argmin(X, Y) + array([0, 1]) + """ + if metric_kwargs is None: + metric_kwargs = {} + + X, Y = check_pairwise_arrays(X, Y) + + if axis == 0: + X, Y = Y, X + + if metric_kwargs is None: + metric_kwargs = {} + + if ArgKmin.is_usable_for(X, Y, metric): + # This is an adaptor for one "sqeuclidean" specification. + # For this backend, we can directly use "sqeuclidean". + if metric_kwargs.get("squared", False) and metric == "euclidean": + metric = "sqeuclidean" + metric_kwargs = {} + + indices = ArgKmin.compute( + X=X, + Y=Y, + k=1, + metric=metric, + metric_kwargs=metric_kwargs, + strategy="auto", + return_distance=False, + ) + indices = indices.flatten() + else: + # Joblib-based backend, which is used when user-defined callable + # are passed for metric. + + # This won't be used in the future once PairwiseDistancesReductions support: + # - DistanceMetrics which work on supposedly binary data + # - CSR-dense and dense-CSR case if 'euclidean' in metric. + + # Turn off check for finiteness because this is costly and because arrays + # have already been validated. + with config_context(assume_finite=True): + indices = np.concatenate( + list( + # This returns a np.ndarray generator whose arrays we need + # to flatten into one. + pairwise_distances_chunked( + X, Y, reduce_func=_argmin_reduce, metric=metric, **metric_kwargs + ) + ) + ) + + return indices + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix", None]}, + prefer_skip_nested_validation=True, +) +def haversine_distances(X, Y=None): + """Compute the Haversine distance between samples in X and Y. + + The Haversine (or great circle) distance is the angular distance between + two points on the surface of a sphere. The first coordinate of each point + is assumed to be the latitude, the second is the longitude, given + in radians. The dimension of the data must be 2. + + .. math:: + D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x_{lat} - y_{lat}) / 2) + + \\cos(x_{lat})\\cos(y_{lat})\\ + sin^2((x_{lon} - y_{lon}) / 2)}] + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, 2) + A feature array. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, 2), default=None + An optional second feature array. If `None`, uses `Y=X`. + + Returns + ------- + distances : ndarray of shape (n_samples_X, n_samples_Y) + The distance matrix. + + Notes + ----- + As the Earth is nearly spherical, the haversine formula provides a good + approximation of the distance between two points of the Earth surface, with + a less than 1% error on average. + + Examples + -------- + We want to calculate the distance between the Ezeiza Airport + (Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris, + France). + + >>> from sklearn.metrics.pairwise import haversine_distances + >>> from math import radians + >>> bsas = [-34.83333, -58.5166646] + >>> paris = [49.0083899664, 2.53844117956] + >>> bsas_in_radians = [radians(_) for _ in bsas] + >>> paris_in_radians = [radians(_) for _ in paris] + >>> result = haversine_distances([bsas_in_radians, paris_in_radians]) + >>> result * 6371000/1000 # multiply by Earth radius to get kilometers + array([[ 0. , 11099.54035582], + [11099.54035582, 0. ]]) + """ + from ..metrics import DistanceMetric + + return DistanceMetric.get_metric("haversine").pairwise(X, Y) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + }, + prefer_skip_nested_validation=True, +) +def manhattan_distances(X, Y=None): + """Compute the L1 distances between the vectors in X and Y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + An array where each row is a sample and each column is a feature. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An array where each row is a sample and each column is a feature. + If `None`, method uses `Y=X`. + + Returns + ------- + distances : ndarray of shape (n_samples_X, n_samples_Y) + Pairwise L1 distances. + + Notes + ----- + When X and/or Y are CSR sparse matrices and they are not already + in canonical format, this function modifies them in-place to + make them canonical. + + Examples + -------- + >>> from sklearn.metrics.pairwise import manhattan_distances + >>> manhattan_distances([[3]], [[3]]) + array([[0.]]) + >>> manhattan_distances([[3]], [[2]]) + array([[1.]]) + >>> manhattan_distances([[2]], [[3]]) + array([[1.]]) + >>> manhattan_distances([[1, 2], [3, 4]],\ + [[1, 2], [0, 3]]) + array([[0., 2.], + [4., 4.]]) + """ + X, Y = check_pairwise_arrays(X, Y) + + if issparse(X) or issparse(Y): + X = csr_matrix(X, copy=False) + Y = csr_matrix(Y, copy=False) + X.sum_duplicates() # this also sorts indices in-place + Y.sum_duplicates() + D = np.zeros((X.shape[0], Y.shape[0])) + _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D) + return D + + return distance.cdist(X, Y, "cityblock") + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + }, + prefer_skip_nested_validation=True, +) +def cosine_distances(X, Y=None): + """Compute cosine distance between samples in X and Y. + + Cosine distance is defined as 1.0 minus the cosine similarity. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + Matrix `X`. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \ + default=None + Matrix `Y`. + + Returns + ------- + distances : ndarray of shape (n_samples_X, n_samples_Y) + Returns the cosine distance between samples in X and Y. + + See Also + -------- + cosine_similarity : Compute cosine similarity between samples in X and Y. + scipy.spatial.distance.cosine : Dense matrices only. + + Examples + -------- + >>> from sklearn.metrics.pairwise import cosine_distances + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> cosine_distances(X, Y) + array([[1. , 1. ], + [0.42..., 0.18...]]) + """ + # 1.0 - cosine_similarity(X, Y) without copy + S = cosine_similarity(X, Y) + S *= -1 + S += 1 + np.clip(S, 0, 2, out=S) + if X is Y or Y is None: + # Ensure that distances between vectors and themselves are set to 0.0. + # This may not be the case due to floating point rounding errors. + S[np.diag_indices_from(S)] = 0.0 + return S + + +# Paired distances +@validate_params( + {"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix"]}, + prefer_skip_nested_validation=True, +) +def paired_euclidean_distances(X, Y): + """Compute the paired euclidean distances between X and Y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input array/matrix X. + + Y : {array-like, sparse matrix} of shape (n_samples, n_features) + Input array/matrix Y. + + Returns + ------- + distances : ndarray of shape (n_samples,) + Output array/matrix containing the calculated paired euclidean + distances. + + Examples + -------- + >>> from sklearn.metrics.pairwise import paired_euclidean_distances + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> paired_euclidean_distances(X, Y) + array([1., 1.]) + """ + X, Y = check_paired_arrays(X, Y) + return row_norms(X - Y) + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix"]}, + prefer_skip_nested_validation=True, +) +def paired_manhattan_distances(X, Y): + """Compute the paired L1 distances between X and Y. + + Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ..., + (X[n_samples], Y[n_samples]). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + An array-like where each row is a sample and each column is a feature. + + Y : {array-like, sparse matrix} of shape (n_samples, n_features) + An array-like where each row is a sample and each column is a feature. + + Returns + ------- + distances : ndarray of shape (n_samples,) + L1 paired distances between the row vectors of `X` + and the row vectors of `Y`. + + Examples + -------- + >>> from sklearn.metrics.pairwise import paired_manhattan_distances + >>> import numpy as np + >>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]]) + >>> Y = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]]) + >>> paired_manhattan_distances(X, Y) + array([1., 2., 1.]) + """ + X, Y = check_paired_arrays(X, Y) + diff = X - Y + if issparse(diff): + diff.data = np.abs(diff.data) + return np.squeeze(np.array(diff.sum(axis=1))) + else: + return np.abs(diff).sum(axis=-1) + + +@validate_params( + {"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix"]}, + prefer_skip_nested_validation=True, +) +def paired_cosine_distances(X, Y): + """ + Compute the paired cosine distances between X and Y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + An array where each row is a sample and each column is a feature. + + Y : {array-like, sparse matrix} of shape (n_samples, n_features) + An array where each row is a sample and each column is a feature. + + Returns + ------- + distances : ndarray of shape (n_samples,) + Returns the distances between the row vectors of `X` + and the row vectors of `Y`, where `distances[i]` is the + distance between `X[i]` and `Y[i]`. + + Notes + ----- + The cosine distance is equivalent to the half the squared + euclidean distance if each sample is normalized to unit norm. + + Examples + -------- + >>> from sklearn.metrics.pairwise import paired_cosine_distances + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> paired_cosine_distances(X, Y) + array([0.5 , 0.18...]) + """ + X, Y = check_paired_arrays(X, Y) + return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True) + + +PAIRED_DISTANCES = { + "cosine": paired_cosine_distances, + "euclidean": paired_euclidean_distances, + "l2": paired_euclidean_distances, + "l1": paired_manhattan_distances, + "manhattan": paired_manhattan_distances, + "cityblock": paired_manhattan_distances, +} + + +@validate_params( + { + "X": ["array-like"], + "Y": ["array-like"], + "metric": [StrOptions(set(PAIRED_DISTANCES)), callable], + }, + prefer_skip_nested_validation=True, +) +def paired_distances(X, Y, *, metric="euclidean", **kwds): + """ + Compute the paired distances between X and Y. + + Compute the distances between (X[0], Y[0]), (X[1], Y[1]), etc... + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Array 1 for distance computation. + + Y : ndarray of shape (n_samples, n_features) + Array 2 for distance computation. + + metric : str or callable, default="euclidean" + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + specified in PAIRED_DISTANCES, including "euclidean", + "manhattan", or "cosine". + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays from `X` as input and return a value indicating + the distance between them. + + **kwds : dict + Unused parameters. + + Returns + ------- + distances : ndarray of shape (n_samples,) + Returns the distances between the row vectors of `X` + and the row vectors of `Y`. + + See Also + -------- + sklearn.metrics.pairwise_distances : Computes the distance between every pair of + samples. + + Examples + -------- + >>> from sklearn.metrics.pairwise import paired_distances + >>> X = [[0, 1], [1, 1]] + >>> Y = [[0, 1], [2, 1]] + >>> paired_distances(X, Y) + array([0., 1.]) + """ + + if metric in PAIRED_DISTANCES: + func = PAIRED_DISTANCES[metric] + return func(X, Y) + elif callable(metric): + # Check the matrix first (it is usually done by the metric) + X, Y = check_paired_arrays(X, Y) + distances = np.zeros(len(X)) + for i in range(len(X)): + distances[i] = metric(X[i], Y[i]) + return distances + + +# Kernels +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "dense_output": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def linear_kernel(X, Y=None, dense_output=True): + """ + Compute the linear kernel between X and Y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + A feature array. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + dense_output : bool, default=True + Whether to return dense output even when the input is sparse. If + ``False``, the output is sparse if both input arrays are sparse. + + .. versionadded:: 0.20 + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + The Gram matrix of the linear kernel, i.e. `X @ Y.T`. + + Examples + -------- + >>> from sklearn.metrics.pairwise import linear_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> linear_kernel(X, Y) + array([[0., 0.], + [1., 2.]]) + """ + X, Y = check_pairwise_arrays(X, Y) + return safe_sparse_dot(X, Y.T, dense_output=dense_output) + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "degree": [Interval(Real, 1, None, closed="left")], + "gamma": [ + Interval(Real, 0, None, closed="left"), + None, + Hidden(np.ndarray), + ], + "coef0": [Interval(Real, None, None, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1): + """ + Compute the polynomial kernel between X and Y. + + K(X, Y) = (gamma + coef0) ^ degree + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + A feature array. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + degree : float, default=3 + Kernel degree. + + gamma : float, default=None + Coefficient of the vector inner product. If None, defaults to 1.0 / n_features. + + coef0 : float, default=1 + Constant offset added to scaled inner product. + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + The polynomial kernel. + + Examples + -------- + >>> from sklearn.metrics.pairwise import polynomial_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> polynomial_kernel(X, Y, degree=2) + array([[1. , 1. ], + [1.77..., 2.77...]]) + """ + X, Y = check_pairwise_arrays(X, Y) + if gamma is None: + gamma = 1.0 / X.shape[1] + + K = safe_sparse_dot(X, Y.T, dense_output=True) + K *= gamma + K += coef0 + K **= degree + return K + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "gamma": [ + Interval(Real, 0, None, closed="left"), + None, + Hidden(np.ndarray), + ], + "coef0": [Interval(Real, None, None, closed="neither")], + }, + prefer_skip_nested_validation=True, +) +def sigmoid_kernel(X, Y=None, gamma=None, coef0=1): + """Compute the sigmoid kernel between X and Y. + + K(X, Y) = tanh(gamma + coef0) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + A feature array. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + gamma : float, default=None + Coefficient of the vector inner product. If None, defaults to 1.0 / n_features. + + coef0 : float, default=1 + Constant offset added to scaled inner product. + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + Sigmoid kernel between two arrays. + + Examples + -------- + >>> from sklearn.metrics.pairwise import sigmoid_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> sigmoid_kernel(X, Y) + array([[0.76..., 0.76...], + [0.87..., 0.93...]]) + """ + X, Y = check_pairwise_arrays(X, Y) + if gamma is None: + gamma = 1.0 / X.shape[1] + + K = safe_sparse_dot(X, Y.T, dense_output=True) + K *= gamma + K += coef0 + np.tanh(K, K) # compute tanh in-place + return K + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "gamma": [ + Interval(Real, 0, None, closed="left"), + None, + Hidden(np.ndarray), + ], + }, + prefer_skip_nested_validation=True, +) +def rbf_kernel(X, Y=None, gamma=None): + """Compute the rbf (gaussian) kernel between X and Y. + + K(x, y) = exp(-gamma ||x-y||^2) + + for each pair of rows x in X and y in Y. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + A feature array. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + gamma : float, default=None + If None, defaults to 1.0 / n_features. + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + The RBF kernel. + + Examples + -------- + >>> from sklearn.metrics.pairwise import rbf_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> rbf_kernel(X, Y) + array([[0.71..., 0.51...], + [0.51..., 0.71...]]) + """ + X, Y = check_pairwise_arrays(X, Y) + if gamma is None: + gamma = 1.0 / X.shape[1] + + K = euclidean_distances(X, Y, squared=True) + K *= -gamma + np.exp(K, K) # exponentiate K in-place + return K + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "gamma": [ + Interval(Real, 0, None, closed="neither"), + Hidden(np.ndarray), + None, + ], + }, + prefer_skip_nested_validation=True, +) +def laplacian_kernel(X, Y=None, gamma=None): + """Compute the laplacian kernel between X and Y. + + The laplacian kernel is defined as:: + + K(x, y) = exp(-gamma ||x-y||_1) + + for each pair of rows x in X and y in Y. + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.17 + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + A feature array. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + gamma : float, default=None + If None, defaults to 1.0 / n_features. Otherwise it should be strictly positive. + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + The kernel matrix. + + Examples + -------- + >>> from sklearn.metrics.pairwise import laplacian_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> laplacian_kernel(X, Y) + array([[0.71..., 0.51...], + [0.51..., 0.71...]]) + """ + X, Y = check_pairwise_arrays(X, Y) + if gamma is None: + gamma = 1.0 / X.shape[1] + + K = -gamma * manhattan_distances(X, Y) + np.exp(K, K) # exponentiate K in-place + return K + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "dense_output": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def cosine_similarity(X, Y=None, dense_output=True): + """Compute cosine similarity between samples in X and Y. + + Cosine similarity, or the cosine kernel, computes similarity as the + normalized dot product of X and Y: + + K(X, Y) = / (||X||*||Y||) + + On L2-normalized data, this function is equivalent to linear_kernel. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + Input data. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \ + default=None + Input data. If ``None``, the output will be the pairwise + similarities between all samples in ``X``. + + dense_output : bool, default=True + Whether to return dense output even when the input is sparse. If + ``False``, the output is sparse if both input arrays are sparse. + + .. versionadded:: 0.17 + parameter ``dense_output`` for dense output. + + Returns + ------- + similarities : ndarray of shape (n_samples_X, n_samples_Y) + Returns the cosine similarity between samples in X and Y. + + Examples + -------- + >>> from sklearn.metrics.pairwise import cosine_similarity + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> cosine_similarity(X, Y) + array([[0. , 0. ], + [0.57..., 0.81...]]) + """ + # to avoid recursive import + + X, Y = check_pairwise_arrays(X, Y) + + X_normalized = normalize(X, copy=True) + if X is Y: + Y_normalized = X_normalized + else: + Y_normalized = normalize(Y, copy=True) + + K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output) + + return K + + +@validate_params( + {"X": ["array-like"], "Y": ["array-like", None]}, + prefer_skip_nested_validation=True, +) +def additive_chi2_kernel(X, Y=None): + """Compute the additive chi-squared kernel between observations in X and Y. + + The chi-squared kernel is computed between each pair of rows in X and Y. X + and Y have to be non-negative. This kernel is most commonly applied to + histograms. + + The chi-squared kernel is given by:: + + k(x, y) = -Sum [(x - y)^2 / (x + y)] + + It can be interpreted as a weighted difference per entry. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) + A feature array. + + Y : array-like of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + The kernel matrix. + + See Also + -------- + chi2_kernel : The exponentiated version of the kernel, which is usually + preferable. + sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation + to this kernel. + + Notes + ----- + As the negative of a distance, this kernel is only conditionally positive + definite. + + References + ---------- + * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. + Local features and kernels for classification of texture and object + categories: A comprehensive study + International Journal of Computer Vision 2007 + https://hal.archives-ouvertes.fr/hal-00171412/document + + Examples + -------- + >>> from sklearn.metrics.pairwise import additive_chi2_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> additive_chi2_kernel(X, Y) + array([[-1., -2.], + [-2., -1.]]) + """ + X, Y = check_pairwise_arrays(X, Y, accept_sparse=False) + if (X < 0).any(): + raise ValueError("X contains negative values.") + if Y is not X and (Y < 0).any(): + raise ValueError("Y contains negative values.") + + result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) + _chi2_kernel_fast(X, Y, result) + return result + + +@validate_params( + { + "X": ["array-like"], + "Y": ["array-like", None], + "gamma": [Interval(Real, 0, None, closed="neither"), Hidden(np.ndarray)], + }, + prefer_skip_nested_validation=True, +) +def chi2_kernel(X, Y=None, gamma=1.0): + """Compute the exponential chi-squared kernel between X and Y. + + The chi-squared kernel is computed between each pair of rows in X and Y. X + and Y have to be non-negative. This kernel is most commonly applied to + histograms. + + The chi-squared kernel is given by:: + + k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) + + It can be interpreted as a weighted difference per entry. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) + A feature array. + + Y : array-like of shape (n_samples_Y, n_features), default=None + An optional second feature array. If `None`, uses `Y=X`. + + gamma : float, default=1 + Scaling parameter of the chi2 kernel. + + Returns + ------- + kernel : ndarray of shape (n_samples_X, n_samples_Y) + The kernel matrix. + + See Also + -------- + additive_chi2_kernel : The additive version of this kernel. + sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation + to the additive version of this kernel. + + References + ---------- + * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. + Local features and kernels for classification of texture and object + categories: A comprehensive study + International Journal of Computer Vision 2007 + https://hal.archives-ouvertes.fr/hal-00171412/document + + Examples + -------- + >>> from sklearn.metrics.pairwise import chi2_kernel + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> chi2_kernel(X, Y) + array([[0.36..., 0.13...], + [0.13..., 0.36...]]) + """ + K = additive_chi2_kernel(X, Y) + K *= gamma + return np.exp(K, K) + + +# Helper functions - distance +PAIRWISE_DISTANCE_FUNCTIONS = { + # If updating this dictionary, update the doc in both distance_metrics() + # and also in pairwise_distances()! + "cityblock": manhattan_distances, + "cosine": cosine_distances, + "euclidean": euclidean_distances, + "haversine": haversine_distances, + "l2": euclidean_distances, + "l1": manhattan_distances, + "manhattan": manhattan_distances, + "precomputed": None, # HACK: precomputed is always allowed, never called + "nan_euclidean": nan_euclidean_distances, +} + + +def distance_metrics(): + """Valid metrics for pairwise_distances. + + This function simply returns the valid pairwise distance metrics. + It exists to allow for a description of the mapping for + each of the valid strings. + + The valid distance metrics, and the function they map to, are: + + =============== ======================================== + metric Function + =============== ======================================== + 'cityblock' metrics.pairwise.manhattan_distances + 'cosine' metrics.pairwise.cosine_distances + 'euclidean' metrics.pairwise.euclidean_distances + 'haversine' metrics.pairwise.haversine_distances + 'l1' metrics.pairwise.manhattan_distances + 'l2' metrics.pairwise.euclidean_distances + 'manhattan' metrics.pairwise.manhattan_distances + 'nan_euclidean' metrics.pairwise.nan_euclidean_distances + =============== ======================================== + + Read more in the :ref:`User Guide `. + + Returns + ------- + distance_metrics : dict + Returns valid metrics for pairwise_distances. + """ + return PAIRWISE_DISTANCE_FUNCTIONS + + +def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs): + """Write in-place to a slice of a distance matrix.""" + dist_matrix[:, slice_] = dist_func(*args, **kwargs) + + +def _parallel_pairwise(X, Y, func, n_jobs, **kwds): + """Break the pairwise matrix in n_jobs even slices + and compute them in parallel.""" + + if Y is None: + Y = X + X, Y, dtype = _return_float_dtype(X, Y) + + if effective_n_jobs(n_jobs) == 1: + return func(X, Y, **kwds) + + # enforce a threading backend to prevent data communication overhead + fd = delayed(_dist_wrapper) + ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order="F") + Parallel(backend="threading", n_jobs=n_jobs)( + fd(func, ret, s, X, Y[s], **kwds) + for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)) + ) + + if (X is Y or Y is None) and func is euclidean_distances: + # zeroing diagonal for euclidean norm. + # TODO: do it also for other norms. + np.fill_diagonal(ret, 0) + + return ret + + +def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds): + """Handle the callable case for pairwise_{distances,kernels}.""" + X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite) + + if X is Y: + # Only calculate metric for upper triangle + out = np.zeros((X.shape[0], Y.shape[0]), dtype="float") + iterator = itertools.combinations(range(X.shape[0]), 2) + for i, j in iterator: + # scipy has not yet implemented 1D sparse slices; once implemented this can + # be removed and `arr[ind]` can be simply used. + x = X[[i], :] if issparse(X) else X[i] + y = Y[[j], :] if issparse(Y) else Y[j] + out[i, j] = metric(x, y, **kwds) + + # Make symmetric + # NB: out += out.T will produce incorrect results + out = out + out.T + + # Calculate diagonal + # NB: nonzero diagonals are allowed for both metrics and kernels + for i in range(X.shape[0]): + # scipy has not yet implemented 1D sparse slices; once implemented this can + # be removed and `arr[ind]` can be simply used. + x = X[[i], :] if issparse(X) else X[i] + out[i, i] = metric(x, x, **kwds) + + else: + # Calculate all cells + out = np.empty((X.shape[0], Y.shape[0]), dtype="float") + iterator = itertools.product(range(X.shape[0]), range(Y.shape[0])) + for i, j in iterator: + # scipy has not yet implemented 1D sparse slices; once implemented this can + # be removed and `arr[ind]` can be simply used. + x = X[[i], :] if issparse(X) else X[i] + y = Y[[j], :] if issparse(Y) else Y[j] + out[i, j] = metric(x, y, **kwds) + + return out + + +def _check_chunk_size(reduced, chunk_size): + """Checks chunk is a sequence of expected size or a tuple of same.""" + if reduced is None: + return + is_tuple = isinstance(reduced, tuple) + if not is_tuple: + reduced = (reduced,) + if any(isinstance(r, tuple) or not hasattr(r, "__iter__") for r in reduced): + raise TypeError( + "reduce_func returned %r. Expected sequence(s) of length %d." + % (reduced if is_tuple else reduced[0], chunk_size) + ) + if any(_num_samples(r) != chunk_size for r in reduced): + actual_size = tuple(_num_samples(r) for r in reduced) + raise ValueError( + "reduce_func returned object of length %s. " + "Expected same length as input: %d." + % (actual_size if is_tuple else actual_size[0], chunk_size) + ) + + +def _precompute_metric_params(X, Y, metric=None, **kwds): + """Precompute data-derived metric parameters if not provided.""" + if metric == "seuclidean" and "V" not in kwds: + if X is Y: + V = np.var(X, axis=0, ddof=1) + else: + raise ValueError( + "The 'V' parameter is required for the seuclidean metric " + "when Y is passed." + ) + return {"V": V} + if metric == "mahalanobis" and "VI" not in kwds: + if X is Y: + VI = np.linalg.inv(np.cov(X.T)).T + else: + raise ValueError( + "The 'VI' parameter is required for the mahalanobis metric " + "when Y is passed." + ) + return {"VI": VI} + return {} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "reduce_func": [callable, None], + "metric": [StrOptions({"precomputed"}.union(_VALID_METRICS)), callable], + "n_jobs": [Integral, None], + "working_memory": [Interval(Real, 0, None, closed="left"), None], + }, + prefer_skip_nested_validation=False, # metric is not validated yet +) +def pairwise_distances_chunked( + X, + Y=None, + *, + reduce_func=None, + metric="euclidean", + n_jobs=None, + working_memory=None, + **kwds, +): + """Generate a distance matrix chunk by chunk with optional reduction. + + In cases where not all of a pairwise distance matrix needs to be + stored at once, this is used to calculate pairwise distances in + ``working_memory``-sized chunks. If ``reduce_func`` is given, it is + run on each chunk and its return values are concatenated into lists, + arrays or sparse matrices. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_samples_X) or \ + (n_samples_X, n_features) + Array of pairwise distances between samples, or a feature array. + The shape the array should be (n_samples_X, n_samples_X) if + metric='precomputed' and (n_samples_X, n_features) otherwise. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. Only allowed if + metric != "precomputed". + + reduce_func : callable, default=None + The function which is applied on each chunk of the distance matrix, + reducing it to needed values. ``reduce_func(D_chunk, start)`` + is called repeatedly, where ``D_chunk`` is a contiguous vertical + slice of the pairwise distance matrix, starting at row ``start``. + It should return one of: None; an array, a list, or a sparse matrix + of length ``D_chunk.shape[0]``; or a tuple of such objects. + Returning None is useful for in-place operations, rather than + reductions. + + If None, pairwise_distances_chunked returns a generator of vertical + chunks of the distance matrix. + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + allowed by scipy.spatial.distance.pdist for its metric parameter, + or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. + If metric is "precomputed", X is assumed to be a distance matrix. + Alternatively, if metric is a callable function, it is called on + each pair of instances (rows) and the resulting value recorded. + The callable should take two arrays from X as input and return a + value indicating the distance between them. + + n_jobs : int, default=None + The number of jobs to use for the computation. This works by + breaking down the pairwise matrix into n_jobs even slices and + computing them in parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + working_memory : float, default=None + The sought maximum memory for temporary distance matrix chunks. + When None (default), the value of + ``sklearn.get_config()['working_memory']`` is used. + + **kwds : optional keyword parameters + Any further parameters are passed directly to the distance function. + If using a scipy.spatial.distance metric, the parameters are still + metric dependent. See the scipy docs for usage examples. + + Yields + ------ + D_chunk : {ndarray, sparse matrix} + A contiguous slice of distance matrix, optionally processed by + ``reduce_func``. + + Examples + -------- + Without reduce_func: + + >>> import numpy as np + >>> from sklearn.metrics import pairwise_distances_chunked + >>> X = np.random.RandomState(0).rand(5, 3) + >>> D_chunk = next(pairwise_distances_chunked(X)) + >>> D_chunk + array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...], + [0.29..., 0. ..., 0.57..., 0.41..., 0.76...], + [0.41..., 0.57..., 0. ..., 0.44..., 0.90...], + [0.19..., 0.41..., 0.44..., 0. ..., 0.51...], + [0.57..., 0.76..., 0.90..., 0.51..., 0. ...]]) + + Retrieve all neighbors and average distance within radius r: + + >>> r = .2 + >>> def reduce_func(D_chunk, start): + ... neigh = [np.flatnonzero(d < r) for d in D_chunk] + ... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1) + ... return neigh, avg_dist + >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func) + >>> neigh, avg_dist = next(gen) + >>> neigh + [array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])] + >>> avg_dist + array([0.039..., 0. , 0. , 0.039..., 0. ]) + + Where r is defined per sample, we need to make use of ``start``: + + >>> r = [.2, .4, .4, .3, .1] + >>> def reduce_func(D_chunk, start): + ... neigh = [np.flatnonzero(d < r[i]) + ... for i, d in enumerate(D_chunk, start)] + ... return neigh + >>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func)) + >>> neigh + [array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])] + + Force row-by-row generation by reducing ``working_memory``: + + >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func, + ... working_memory=0) + >>> next(gen) + [array([0, 3])] + >>> next(gen) + [array([0, 1])] + """ + n_samples_X = _num_samples(X) + if metric == "precomputed": + slices = (slice(0, n_samples_X),) + else: + if Y is None: + Y = X + # We get as many rows as possible within our working_memory budget to + # store len(Y) distances in each row of output. + # + # Note: + # - this will get at least 1 row, even if 1 row of distances will + # exceed working_memory. + # - this does not account for any temporary memory usage while + # calculating distances (e.g. difference of vectors in manhattan + # distance. + chunk_n_rows = get_chunk_n_rows( + row_bytes=8 * _num_samples(Y), + max_n_rows=n_samples_X, + working_memory=working_memory, + ) + slices = gen_batches(n_samples_X, chunk_n_rows) + + # precompute data-derived metric params + params = _precompute_metric_params(X, Y, metric=metric, **kwds) + kwds.update(**params) + + for sl in slices: + if sl.start == 0 and sl.stop == n_samples_X: + X_chunk = X # enable optimised paths for X is Y + else: + X_chunk = X[sl] + D_chunk = pairwise_distances(X_chunk, Y, metric=metric, n_jobs=n_jobs, **kwds) + if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get( + metric, None + ) is euclidean_distances: + # zeroing diagonal, taking care of aliases of "euclidean", + # i.e. "l2" + D_chunk.flat[sl.start :: _num_samples(X) + 1] = 0 + if reduce_func is not None: + chunk_size = D_chunk.shape[0] + D_chunk = reduce_func(D_chunk, sl.start) + _check_chunk_size(D_chunk, chunk_size) + yield D_chunk + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable], + "n_jobs": [Integral, None], + "force_all_finite": ["boolean", StrOptions({"allow-nan"})], + }, + prefer_skip_nested_validation=True, +) +def pairwise_distances( + X, Y=None, metric="euclidean", *, n_jobs=None, force_all_finite=True, **kwds +): + """Compute the distance matrix from a vector array X and optional Y. + + This method takes either a vector array or a distance matrix, and returns + a distance matrix. If the input is a vector array, the distances are + computed. If the input is a distances matrix, it is returned instead. + + This method provides a safe way to take a distance matrix as input, while + preserving compatibility with many other algorithms that take a vector + array. + + If Y is given (default is None), then the returned matrix is the pairwise + distance between the arrays from both X and Y. + + Valid values for metric are: + + - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', + 'manhattan']. These metrics support sparse matrix + inputs. + ['nan_euclidean'] but it does not yet support sparse matrices. + + - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', + 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', + 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', + 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] + See the documentation for scipy.spatial.distance for details on these + metrics. These metrics do not support sparse matrix inputs. + + .. note:: + `'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11. + + .. note:: + `'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead). + + Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are + valid scipy.spatial.distance metrics), the scikit-learn implementation + will be used, which is faster and has support for sparse matrices (except + for 'cityblock'). For a verbose description of the metrics from + scikit-learn, see :func:`sklearn.metrics.pairwise.distance_metrics` + function. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_samples_X) or \ + (n_samples_X, n_features) + Array of pairwise distances between samples, or a feature array. + The shape of the array should be (n_samples_X, n_samples_X) if + metric == "precomputed" and (n_samples_X, n_features) otherwise. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + An optional second feature array. Only allowed if + metric != "precomputed". + + metric : str or callable, default='euclidean' + The metric to use when calculating distance between instances in a + feature array. If metric is a string, it must be one of the options + allowed by scipy.spatial.distance.pdist for its metric parameter, or + a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. + If metric is "precomputed", X is assumed to be a distance matrix. + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays from X as input and return a value indicating + the distance between them. + + n_jobs : int, default=None + The number of jobs to use for the computation. This works by breaking + down the pairwise matrix into n_jobs even slices and computing them in + parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + force_all_finite : bool or 'allow-nan', default=True + Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored + for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The + possibilities are: + + - True: Force all values of array to be finite. + - False: accepts np.inf, np.nan, pd.NA in array. + - 'allow-nan': accepts only np.nan and pd.NA values in array. Values + cannot be infinite. + + .. versionadded:: 0.22 + ``force_all_finite`` accepts the string ``'allow-nan'``. + + .. versionchanged:: 0.23 + Accepts `pd.NA` and converts it into `np.nan`. + + **kwds : optional keyword parameters + Any further parameters are passed directly to the distance function. + If using a scipy.spatial.distance metric, the parameters are still + metric dependent. See the scipy docs for usage examples. + + Returns + ------- + D : ndarray of shape (n_samples_X, n_samples_X) or \ + (n_samples_X, n_samples_Y) + A distance matrix D such that D_{i, j} is the distance between the + ith and jth vectors of the given matrix X, if Y is None. + If Y is not None, then D_{i, j} is the distance between the ith array + from X and the jth array from Y. + + See Also + -------- + pairwise_distances_chunked : Performs the same calculation as this + function, but returns a generator of chunks of the distance matrix, in + order to limit memory usage. + sklearn.metrics.pairwise.paired_distances : Computes the distances between + corresponding elements of two arrays. + + Examples + -------- + >>> from sklearn.metrics.pairwise import pairwise_distances + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> pairwise_distances(X, Y, metric='sqeuclidean') + array([[1., 2.], + [2., 1.]]) + """ + if metric == "precomputed": + X, _ = check_pairwise_arrays( + X, Y, precomputed=True, force_all_finite=force_all_finite + ) + + whom = ( + "`pairwise_distances`. Precomputed distance " + " need to have non-negative values." + ) + check_non_negative(X, whom=whom) + return X + elif metric in PAIRWISE_DISTANCE_FUNCTIONS: + func = PAIRWISE_DISTANCE_FUNCTIONS[metric] + elif callable(metric): + func = partial( + _pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds + ) + else: + if issparse(X) or issparse(Y): + raise TypeError("scipy distance metrics do not support sparse matrices.") + + dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None + + if dtype == bool and (X.dtype != bool or (Y is not None and Y.dtype != bool)): + msg = "Data was converted to boolean for metric %s" % metric + warnings.warn(msg, DataConversionWarning) + + X, Y = check_pairwise_arrays( + X, Y, dtype=dtype, force_all_finite=force_all_finite + ) + + # precompute data-derived metric params + params = _precompute_metric_params(X, Y, metric=metric, **kwds) + kwds.update(**params) + + if effective_n_jobs(n_jobs) == 1 and X is Y: + return distance.squareform(distance.pdist(X, metric=metric, **kwds)) + func = partial(distance.cdist, metric=metric, **kwds) + + return _parallel_pairwise(X, Y, func, n_jobs, **kwds) + + +# These distances require boolean arrays, when using scipy.spatial.distance +PAIRWISE_BOOLEAN_FUNCTIONS = [ + "dice", + "jaccard", + "rogerstanimoto", + "russellrao", + "sokalmichener", + "sokalsneath", + "yule", +] +if sp_base_version < parse_version("1.11"): + # Deprecated in SciPy 1.9 and removed in SciPy 1.11 + PAIRWISE_BOOLEAN_FUNCTIONS += ["kulsinski"] +if sp_base_version < parse_version("1.9"): + # Deprecated in SciPy 1.0 and removed in SciPy 1.9 + PAIRWISE_BOOLEAN_FUNCTIONS += ["matching"] + +# Helper functions - distance +PAIRWISE_KERNEL_FUNCTIONS = { + # If updating this dictionary, update the doc in both distance_metrics() + # and also in pairwise_distances()! + "additive_chi2": additive_chi2_kernel, + "chi2": chi2_kernel, + "linear": linear_kernel, + "polynomial": polynomial_kernel, + "poly": polynomial_kernel, + "rbf": rbf_kernel, + "laplacian": laplacian_kernel, + "sigmoid": sigmoid_kernel, + "cosine": cosine_similarity, +} + + +def kernel_metrics(): + """Valid metrics for pairwise_kernels. + + This function simply returns the valid pairwise distance metrics. + It exists, however, to allow for a verbose description of the mapping for + each of the valid strings. + + The valid distance metrics, and the function they map to, are: + =============== ======================================== + metric Function + =============== ======================================== + 'additive_chi2' sklearn.pairwise.additive_chi2_kernel + 'chi2' sklearn.pairwise.chi2_kernel + 'linear' sklearn.pairwise.linear_kernel + 'poly' sklearn.pairwise.polynomial_kernel + 'polynomial' sklearn.pairwise.polynomial_kernel + 'rbf' sklearn.pairwise.rbf_kernel + 'laplacian' sklearn.pairwise.laplacian_kernel + 'sigmoid' sklearn.pairwise.sigmoid_kernel + 'cosine' sklearn.pairwise.cosine_similarity + =============== ======================================== + + Read more in the :ref:`User Guide `. + + Returns + ------- + kernel_metrics : dict + Returns valid metrics for pairwise_kernels. + """ + return PAIRWISE_KERNEL_FUNCTIONS + + +KERNEL_PARAMS = { + "additive_chi2": (), + "chi2": frozenset(["gamma"]), + "cosine": (), + "linear": (), + "poly": frozenset(["gamma", "degree", "coef0"]), + "polynomial": frozenset(["gamma", "degree", "coef0"]), + "rbf": frozenset(["gamma"]), + "laplacian": frozenset(["gamma"]), + "sigmoid": frozenset(["gamma", "coef0"]), +} + + +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "Y": ["array-like", "sparse matrix", None], + "metric": [ + StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS) | {"precomputed"}), + callable, + ], + "filter_params": ["boolean"], + "n_jobs": [Integral, None], + }, + prefer_skip_nested_validation=True, +) +def pairwise_kernels( + X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds +): + """Compute the kernel between arrays X and optional array Y. + + This method takes either a vector array or a kernel matrix, and returns + a kernel matrix. If the input is a vector array, the kernels are + computed. If the input is a kernel matrix, it is returned instead. + + This method provides a safe way to take a kernel matrix as input, while + preserving compatibility with many other algorithms that take a vector + array. + + If Y is given (default is None), then the returned matrix is the pairwise + kernel between the arrays from both X and Y. + + Valid values for metric are: + ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', + 'laplacian', 'sigmoid', 'cosine'] + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_samples_X) or \ + (n_samples_X, n_features) + Array of pairwise kernels between samples, or a feature array. + The shape of the array should be (n_samples_X, n_samples_X) if + metric == "precomputed" and (n_samples_X, n_features) otherwise. + + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None + A second feature array only if X has shape (n_samples_X, n_features). + + metric : str or callable, default="linear" + The metric to use when calculating kernel between instances in a + feature array. If metric is a string, it must be one of the metrics + in ``pairwise.PAIRWISE_KERNEL_FUNCTIONS``. + If metric is "precomputed", X is assumed to be a kernel matrix. + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two rows from X as input and return the corresponding + kernel value as a single number. This means that callables from + :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on + matrices, not single samples. Use the string identifying the kernel + instead. + + filter_params : bool, default=False + Whether to filter invalid parameters or not. + + n_jobs : int, default=None + The number of jobs to use for the computation. This works by breaking + down the pairwise matrix into n_jobs even slices and computing them in + parallel. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + **kwds : optional keyword parameters + Any further parameters are passed directly to the kernel function. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y) + A kernel matrix K such that K_{i, j} is the kernel between the + ith and jth vectors of the given matrix X, if Y is None. + If Y is not None, then K_{i, j} is the kernel between the ith array + from X and the jth array from Y. + + Notes + ----- + If metric is 'precomputed', Y is ignored and X is returned. + + Examples + -------- + >>> from sklearn.metrics.pairwise import pairwise_kernels + >>> X = [[0, 0, 0], [1, 1, 1]] + >>> Y = [[1, 0, 0], [1, 1, 0]] + >>> pairwise_kernels(X, Y, metric='linear') + array([[0., 0.], + [1., 2.]]) + """ + # import GPKernel locally to prevent circular imports + from ..gaussian_process.kernels import Kernel as GPKernel + + if metric == "precomputed": + X, _ = check_pairwise_arrays(X, Y, precomputed=True) + return X + elif isinstance(metric, GPKernel): + func = metric.__call__ + elif metric in PAIRWISE_KERNEL_FUNCTIONS: + if filter_params: + kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]} + func = PAIRWISE_KERNEL_FUNCTIONS[metric] + elif callable(metric): + func = partial(_pairwise_callable, metric=metric, **kwds) + + return _parallel_pairwise(X, Y, func, n_jobs, **kwds)