peacock-data-public-datasets-idc-cronscript
/
venv
/lib
/python3.10
/site-packages
/sklearn
/utils
/_plotting.py
import numpy as np | |
from . import check_consistent_length, check_matplotlib_support | |
from ._response import _get_response_values_binary | |
from .multiclass import type_of_target | |
from .validation import _check_pos_label_consistency | |
class _BinaryClassifierCurveDisplayMixin: | |
"""Mixin class to be used in Displays requiring a binary classifier. | |
The aim of this class is to centralize some validations regarding the estimator and | |
the target and gather the response of the estimator. | |
""" | |
def _validate_plot_params(self, *, ax=None, name=None): | |
check_matplotlib_support(f"{self.__class__.__name__}.plot") | |
import matplotlib.pyplot as plt | |
if ax is None: | |
_, ax = plt.subplots() | |
name = self.estimator_name if name is None else name | |
return ax, ax.figure, name | |
def _validate_and_get_response_values( | |
cls, estimator, X, y, *, response_method="auto", pos_label=None, name=None | |
): | |
check_matplotlib_support(f"{cls.__name__}.from_estimator") | |
name = estimator.__class__.__name__ if name is None else name | |
y_pred, pos_label = _get_response_values_binary( | |
estimator, | |
X, | |
response_method=response_method, | |
pos_label=pos_label, | |
) | |
return y_pred, pos_label, name | |
def _validate_from_predictions_params( | |
cls, y_true, y_pred, *, sample_weight=None, pos_label=None, name=None | |
): | |
check_matplotlib_support(f"{cls.__name__}.from_predictions") | |
if type_of_target(y_true) != "binary": | |
raise ValueError( | |
f"The target y is not binary. Got {type_of_target(y_true)} type of" | |
" target." | |
) | |
check_consistent_length(y_true, y_pred, sample_weight) | |
pos_label = _check_pos_label_consistency(pos_label, y_true) | |
name = name if name is not None else "Classifier" | |
return pos_label, name | |
def _validate_score_name(score_name, scoring, negate_score): | |
"""Validate the `score_name` parameter. | |
If `score_name` is provided, we just return it as-is. | |
If `score_name` is `None`, we use `Score` if `negate_score` is `False` and | |
`Negative score` otherwise. | |
If `score_name` is a string or a callable, we infer the name. We replace `_` by | |
spaces and capitalize the first letter. We remove `neg_` and replace it by | |
`"Negative"` if `negate_score` is `False` or just remove it otherwise. | |
""" | |
if score_name is not None: | |
return score_name | |
elif scoring is None: | |
return "Negative score" if negate_score else "Score" | |
else: | |
score_name = scoring.__name__ if callable(scoring) else scoring | |
if negate_score: | |
if score_name.startswith("neg_"): | |
score_name = score_name[4:] | |
else: | |
score_name = f"Negative {score_name}" | |
elif score_name.startswith("neg_"): | |
score_name = f"Negative {score_name[4:]}" | |
score_name = score_name.replace("_", " ") | |
return score_name.capitalize() | |
def _interval_max_min_ratio(data): | |
"""Compute the ratio between the largest and smallest inter-point distances. | |
A value larger than 5 typically indicates that the parameter range would | |
better be displayed with a log scale while a linear scale would be more | |
suitable otherwise. | |
""" | |
diff = np.diff(np.sort(data)) | |
return diff.max() / diff.min() | |